diff mbox series

[RFC,v4,30/71] cpu: define cpu_interrupt_request helpers

Message ID 20181025144644.15464-30-cota@braap.org
State New
Headers show
Series [RFC,v4,01/71] cpu: convert queued work to a QSIMPLEQ | expand

Commit Message

Emilio Cota Oct. 25, 2018, 2:46 p.m. UTC
Add a comment about how atomic_read works here. The comment refers to
a "BQL-less CPU loop", which will materialize toward the end
of this series.

Note that the modifications to cpu_reset_interrupt are there to
avoid deadlock during the CPU lock transition; once that is complete,
cpu_interrupt_request will be simple again.

Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 include/qom/cpu.h | 37 +++++++++++++++++++++++++++++++++++++
 qom/cpu.c         | 27 +++++++++++++++++++++------
 2 files changed, 58 insertions(+), 6 deletions(-)

Comments

Richard Henderson Oct. 26, 2018, 3:07 p.m. UTC | #1
On 10/25/18 3:46 PM, Emilio G. Cota wrote:
> Add a comment about how atomic_read works here. The comment refers to
> a "BQL-less CPU loop", which will materialize toward the end
> of this series.
> 
> Note that the modifications to cpu_reset_interrupt are there to
> avoid deadlock during the CPU lock transition; once that is complete,
> cpu_interrupt_request will be simple again.
> 
> Signed-off-by: Emilio G. Cota <cota@braap.org>
> ---
>  include/qom/cpu.h | 37 +++++++++++++++++++++++++++++++++++++
>  qom/cpu.c         | 27 +++++++++++++++++++++------
>  2 files changed, 58 insertions(+), 6 deletions(-)

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

r~
Alex Bennée Oct. 31, 2018, 4:21 p.m. UTC | #2
Emilio G. Cota <cota@braap.org> writes:

> Add a comment about how atomic_read works here. The comment refers to
> a "BQL-less CPU loop", which will materialize toward the end
> of this series.
>
> Note that the modifications to cpu_reset_interrupt are there to
> avoid deadlock during the CPU lock transition; once that is complete,
> cpu_interrupt_request will be simple again.
>
<snip>
> --- a/qom/cpu.c
> +++ b/qom/cpu.c
> @@ -98,14 +98,29 @@ static void cpu_common_get_memory_mapping(CPUState *cpu,
>   * BQL here if we need to.  cpu_interrupt assumes it is held.*/
>  void cpu_reset_interrupt(CPUState *cpu, int mask)
>  {
> -    bool need_lock = !qemu_mutex_iothread_locked();
> +    bool has_bql = qemu_mutex_iothread_locked();
> +    bool has_cpu_lock = cpu_mutex_locked(cpu);
>
> -    if (need_lock) {
> -        qemu_mutex_lock_iothread();
> +    if (has_bql) {
> +        if (has_cpu_lock) {
> +            atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
> +        } else {
> +            cpu_mutex_lock(cpu);
> +            atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
> +            cpu_mutex_unlock(cpu);
> +        }
> +        return;
> +    }
> +
> +    if (has_cpu_lock) {
> +        cpu_mutex_unlock(cpu);
>      }
> -    cpu->interrupt_request &= ~mask;
> -    if (need_lock) {
> -        qemu_mutex_unlock_iothread();
> +    qemu_mutex_lock_iothread();
> +    cpu_mutex_lock(cpu);
> +    atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
> +    qemu_mutex_unlock_iothread();
> +    if (!has_cpu_lock) {
> +        cpu_mutex_unlock(cpu);
>      }
>  }

Yeah I can see this is pretty ugly but cleaned up by the end of the
series. If it's the same sequence as the previous patch I wonder if it's
possible to hide the ugliness in a common helper while we transition?

Otherwise:

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

--
Alex Bennée
diff mbox series

Patch

diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index aeed63a705..a86690c7a5 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -511,6 +511,43 @@  static inline void cpu_halted_set(CPUState *cpu, uint32_t val)
     cpu_mutex_unlock(cpu);
 }
 
+/*
+ * When sending an interrupt, setters OR the appropriate bit and kick the
+ * destination vCPU. The latter can then read interrupt_request without
+ * acquiring the CPU lock, because once the kick-induced completes, they'll read
+ * an up-to-date interrupt_request.
+ * Setters always acquire the lock, which guarantees that (1) concurrent
+ * updates from different threads won't result in data races, and (2) the
+ * BQL-less CPU loop will always see an up-to-date interrupt_request, since the
+ * loop holds the CPU lock.
+ */
+static inline uint32_t cpu_interrupt_request(CPUState *cpu)
+{
+    return atomic_read(&cpu->interrupt_request);
+}
+
+static inline void cpu_interrupt_request_or(CPUState *cpu, uint32_t mask)
+{
+    if (cpu_mutex_locked(cpu)) {
+        atomic_set(&cpu->interrupt_request, cpu->interrupt_request | mask);
+        return;
+    }
+    cpu_mutex_lock(cpu);
+    atomic_set(&cpu->interrupt_request, cpu->interrupt_request | mask);
+    cpu_mutex_unlock(cpu);
+}
+
+static inline void cpu_interrupt_request_set(CPUState *cpu, uint32_t val)
+{
+    if (cpu_mutex_locked(cpu)) {
+        atomic_set(&cpu->interrupt_request, val);
+        return;
+    }
+    cpu_mutex_lock(cpu);
+    atomic_set(&cpu->interrupt_request, val);
+    cpu_mutex_unlock(cpu);
+}
+
 static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
 {
     unsigned int i;
diff --git a/qom/cpu.c b/qom/cpu.c
index bb031a3a6a..ecdf8e7aac 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -98,14 +98,29 @@  static void cpu_common_get_memory_mapping(CPUState *cpu,
  * BQL here if we need to.  cpu_interrupt assumes it is held.*/
 void cpu_reset_interrupt(CPUState *cpu, int mask)
 {
-    bool need_lock = !qemu_mutex_iothread_locked();
+    bool has_bql = qemu_mutex_iothread_locked();
+    bool has_cpu_lock = cpu_mutex_locked(cpu);
 
-    if (need_lock) {
-        qemu_mutex_lock_iothread();
+    if (has_bql) {
+        if (has_cpu_lock) {
+            atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
+        } else {
+            cpu_mutex_lock(cpu);
+            atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
+            cpu_mutex_unlock(cpu);
+        }
+        return;
+    }
+
+    if (has_cpu_lock) {
+        cpu_mutex_unlock(cpu);
     }
-    cpu->interrupt_request &= ~mask;
-    if (need_lock) {
-        qemu_mutex_unlock_iothread();
+    qemu_mutex_lock_iothread();
+    cpu_mutex_lock(cpu);
+    atomic_set(&cpu->interrupt_request, cpu->interrupt_request & ~mask);
+    qemu_mutex_unlock_iothread();
+    if (!has_cpu_lock) {
+        cpu_mutex_unlock(cpu);
     }
 }