diff mbox

[RFC,1/3] cpus: protect queued_work_* with work_mutex.

Message ID 1436541553-26576-2-git-send-email-fred.konrad@greensocs.com
State New
Headers show

Commit Message

fred.konrad@greensocs.com July 10, 2015, 3:19 p.m. UTC
From: KONRAD Frederic <fred.konrad@greensocs.com>

This protects queued_work_* used by async_run_on_cpu, run_on_cpu and
flush_queued_work with a new lock (work_mutex) to prevent multiple (concurrent)
access.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
---
 cpus.c            | 9 +++++++++
 include/qom/cpu.h | 3 +++
 qom/cpu.c         | 1 +
 3 files changed, 13 insertions(+)

Comments

Paolo Bonzini July 10, 2015, 3:22 p.m. UTC | #1
On 10/07/2015 17:19, fred.konrad@greensocs.com wrote:
> +    qemu_mutex_lock(&cpu->work_mutex);
>      while ((wi = cpu->queued_work_first)) {
>          cpu->queued_work_first = wi->next;
>          wi->func(wi->data);

Please unlock the mutex while calling the callback.

Paolo

> @@ -905,6 +912,8 @@ static void flush_queued_work(CPUState *cpu)
>          }
>      }
>      cpu->queued_work_last = NULL;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
>      qemu_cond_broadcast(&qemu_work_cond);
fred.konrad@greensocs.com July 10, 2015, 3:32 p.m. UTC | #2
On 10/07/2015 17:22, Paolo Bonzini wrote:
>
> On 10/07/2015 17:19, fred.konrad@greensocs.com wrote:
>> +    qemu_mutex_lock(&cpu->work_mutex);
>>       while ((wi = cpu->queued_work_first)) {
>>           cpu->queued_work_first = wi->next;
>>           wi->func(wi->data);
> Please unlock the mutex while calling the callback.
>
> Paolo
>
>> @@ -905,6 +912,8 @@ static void flush_queued_work(CPUState *cpu)
>>           }
>>       }
>>       cpu->queued_work_last = NULL;
>> +    qemu_mutex_unlock(&cpu->work_mutex);
>> +
>>       qemu_cond_broadcast(&qemu_work_cond);

I think something like that can work because we don't have two
flush_queued_work at the same time on the same CPU?

static void flush_queued_work(CPUState *cpu)
{
     struct qemu_work_item *wi;

     if (cpu->queued_work_first == NULL) {
         return;
     }

     qemu_mutex_lock(&cpu->work_mutex);
     while ((wi = cpu->queued_work_first)) {
         cpu->queued_work_first = wi->next;
         qemu_mutex_unlock(&cpu->work_mutex);
         wi->func(wi->data);
         qemu_mutex_lock(&cpu->work_mutex);
         wi->done = true;
         if (wi->free) {
             g_free(wi);
         }
     }
     cpu->queued_work_last = NULL;
     qemu_mutex_unlock(&cpu->work_mutex);

     qemu_cond_broadcast(&qemu_work_cond);
}

Fred
Paolo Bonzini July 10, 2015, 3:34 p.m. UTC | #3
On 10/07/2015 17:32, Frederic Konrad wrote:
>>>
> 
> I think something like that can work because we don't have two
> flush_queued_work at the same time on the same CPU?

Yes, this works; there is only one consumer.

Holding locks within a callback can be very painful, especially if there
is a chance that the callback will take a very coarse lock such as big
QEMU lock.  It can cause AB-BA deadlocks.

Paolo

> static void flush_queued_work(CPUState *cpu)
> {
>     struct qemu_work_item *wi;
> 
>     if (cpu->queued_work_first == NULL) {
>         return;
>     }
> 
>     qemu_mutex_lock(&cpu->work_mutex);
>     while ((wi = cpu->queued_work_first)) {
>         cpu->queued_work_first = wi->next;
>         qemu_mutex_unlock(&cpu->work_mutex);
>         wi->func(wi->data);
>         qemu_mutex_lock(&cpu->work_mutex);
>         wi->done = true;
>         if (wi->free) {
>             g_free(wi);
>         }
>     }
>     cpu->queued_work_last = NULL;
>     qemu_mutex_unlock(&cpu->work_mutex);
> 
>     qemu_cond_broadcast(&qemu_work_cond);
> }
fred.konrad@greensocs.com July 10, 2015, 3:43 p.m. UTC | #4
On 10/07/2015 17:34, Paolo Bonzini wrote:
>
> On 10/07/2015 17:32, Frederic Konrad wrote:
>> I think something like that can work because we don't have two
>> flush_queued_work at the same time on the same CPU?
> Yes, this works; there is only one consumer.
>
> Holding locks within a callback can be very painful, especially if there
> is a chance that the callback will take a very coarse lock such as big
> QEMU lock.  It can cause AB-BA deadlocks.
>
> Paolo

Ok fine I'll change that.

Fred
>
>> static void flush_queued_work(CPUState *cpu)
>> {
>>      struct qemu_work_item *wi;
>>
>>      if (cpu->queued_work_first == NULL) {
>>          return;
>>      }
>>
>>      qemu_mutex_lock(&cpu->work_mutex);
>>      while ((wi = cpu->queued_work_first)) {
>>          cpu->queued_work_first = wi->next;
>>          qemu_mutex_unlock(&cpu->work_mutex);
>>          wi->func(wi->data);
>>          qemu_mutex_lock(&cpu->work_mutex);
>>          wi->done = true;
>>          if (wi->free) {
>>              g_free(wi);
>>          }
>>      }
>>      cpu->queued_work_last = NULL;
>>      qemu_mutex_unlock(&cpu->work_mutex);
>>
>>      qemu_cond_broadcast(&qemu_work_cond);
>> }
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index b00a423..3d95dbb 100644
--- a/cpus.c
+++ b/cpus.c
@@ -845,6 +845,8 @@  void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     wi.func = func;
     wi.data = data;
     wi.free = false;
+
+    qemu_mutex_lock(&cpu->work_mutex);
     if (cpu->queued_work_first == NULL) {
         cpu->queued_work_first = &wi;
     } else {
@@ -853,6 +855,7 @@  void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     cpu->queued_work_last = &wi;
     wi.next = NULL;
     wi.done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
 
     qemu_cpu_kick(cpu);
     while (!wi.done) {
@@ -876,6 +879,8 @@  void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     wi->func = func;
     wi->data = data;
     wi->free = true;
+
+    qemu_mutex_lock(&cpu->work_mutex);
     if (cpu->queued_work_first == NULL) {
         cpu->queued_work_first = wi;
     } else {
@@ -884,6 +889,7 @@  void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     cpu->queued_work_last = wi;
     wi->next = NULL;
     wi->done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
 
     qemu_cpu_kick(cpu);
 }
@@ -896,6 +902,7 @@  static void flush_queued_work(CPUState *cpu)
         return;
     }
 
+    qemu_mutex_lock(&cpu->work_mutex);
     while ((wi = cpu->queued_work_first)) {
         cpu->queued_work_first = wi->next;
         wi->func(wi->data);
@@ -905,6 +912,8 @@  static void flush_queued_work(CPUState *cpu)
         }
     }
     cpu->queued_work_last = NULL;
+    qemu_mutex_unlock(&cpu->work_mutex);
+
     qemu_cond_broadcast(&qemu_work_cond);
 }
 
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 20aabc9..efa9624 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -242,6 +242,8 @@  struct kvm_run;
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
+ * @work_mutex: Lock to prevent multiple access to queued_work_*.
+ * @queued_work_first: First asynchronous work pending.
  *
  * State of one CPU core or thread.
  */
@@ -262,6 +264,7 @@  struct CPUState {
     uint32_t host_tid;
     bool running;
     struct QemuCond *halt_cond;
+    QemuMutex work_mutex;
     struct qemu_work_item *queued_work_first, *queued_work_last;
     bool thread_kicked;
     bool created;
diff --git a/qom/cpu.c b/qom/cpu.c
index eb9cfec..4e12598 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -316,6 +316,7 @@  static void cpu_common_initfn(Object *obj)
     cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
     QTAILQ_INIT(&cpu->breakpoints);
     QTAILQ_INIT(&cpu->watchpoints);
+    qemu_mutex_init(&cpu->work_mutex);
 }
 
 static void cpu_common_finalize(Object *obj)