Message ID | 1439220437-23957-2-git-send-email-fred.konrad@greensocs.com |
---|---|
State | New |
Headers | show |
On 10/08/2015 17:26, fred.konrad@greensocs.com wrote: > > + qemu_mutex_lock(&cpu->work_mutex); > while ((wi = cpu->queued_work_first)) { > cpu->queued_work_first = wi->next; > + qemu_mutex_unlock(&cpu->work_mutex); > wi->func(wi->data); > + qemu_mutex_lock(&cpu->work_mutex); > wi->done = true; This should be atomic_mb_set > if (wi->free) { > g_free(wi); > } > } > cpu->queued_work_last = NULL; ... and I'm a bit afraid of leaving the state of the list inconsistent, so I'd move this after the cpu->queued_work_first assignment. Otherwise the patch looks good, I'm queuing it for 2.5. Paolo > + qemu_mutex_unlock(&cpu->work_mutex); > +
On 10/08/2015 17:59, Paolo Bonzini wrote: > > On 10/08/2015 17:26, fred.konrad@greensocs.com wrote: >> >> + qemu_mutex_lock(&cpu->work_mutex); >> while ((wi = cpu->queued_work_first)) { >> cpu->queued_work_first = wi->next; >> + qemu_mutex_unlock(&cpu->work_mutex); >> wi->func(wi->data); >> + qemu_mutex_lock(&cpu->work_mutex); >> wi->done = true; > This should be atomic_mb_set Isn't that protected by the mutex? Or maybe it's used somewhere else? > >> if (wi->free) { >> g_free(wi); >> } >> } >> cpu->queued_work_last = NULL; > ... and I'm a bit afraid of leaving the state of the list inconsistent, > so I'd move this after the cpu->queued_work_first assignment. Otherwise > the patch looks good, I'm queuing it for 2.5. > > Paolo > >> + qemu_mutex_unlock(&cpu->work_mutex); >> +
On 10/08/2015 18:04, Frederic Konrad wrote: > On 10/08/2015 17:59, Paolo Bonzini wrote: >> >> On 10/08/2015 17:26, fred.konrad@greensocs.com wrote: >>> + qemu_mutex_lock(&cpu->work_mutex); >>> while ((wi = cpu->queued_work_first)) { >>> cpu->queued_work_first = wi->next; >>> + qemu_mutex_unlock(&cpu->work_mutex); >>> wi->func(wi->data); >>> + qemu_mutex_lock(&cpu->work_mutex); >>> wi->done = true; >> This should be atomic_mb_set > > Isn't that protected by the mutex? This use is not protected by the mutex: @@ -853,6 +855,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); while (!wi.done) { Paolo Or maybe it's used somewhere else? >> >>> if (wi->free) { >>> g_free(wi); >>> } >>> } >>> cpu->queued_work_last = NULL; >> ... and I'm a bit afraid of leaving the state of the list inconsistent, >> so I'd move this after the cpu->queued_work_first assignment. Otherwise >> the patch looks good, I'm queuing it for 2.5. >> >> Paolo >> >>> + qemu_mutex_unlock(&cpu->work_mutex); >>> + >
diff --git a/cpus.c b/cpus.c index b00a423..eabd4b1 100644 --- a/cpus.c +++ b/cpus.c @@ -845,6 +845,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) wi.func = func; wi.data = data; wi.free = false; + + qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { @@ -853,6 +855,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); while (!wi.done) { @@ -876,6 +879,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) wi->func = func; wi->data = data; wi->free = true; + + qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { @@ -884,6 +889,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); } @@ -896,15 +902,20 @@ static void flush_queued_work(CPUState *cpu) return; } + qemu_mutex_lock(&cpu->work_mutex); while ((wi = cpu->queued_work_first)) { cpu->queued_work_first = wi->next; + qemu_mutex_unlock(&cpu->work_mutex); wi->func(wi->data); + qemu_mutex_lock(&cpu->work_mutex); wi->done = true; if (wi->free) { g_free(wi); } } cpu->queued_work_last = NULL; + qemu_mutex_unlock(&cpu->work_mutex); + qemu_cond_broadcast(&qemu_work_cond); } diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 20aabc9..efa9624 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -242,6 +242,8 @@ struct kvm_run; * @mem_io_pc: Host Program Counter at which the memory was accessed. * @mem_io_vaddr: Target virtual address at which the memory was accessed. * @kvm_fd: vCPU file descriptor for KVM. + * @work_mutex: Lock to prevent multiple access to queued_work_*. + * @queued_work_first: First asynchronous work pending. * * State of one CPU core or thread. */ @@ -262,6 +264,7 @@ struct CPUState { uint32_t host_tid; bool running; struct QemuCond *halt_cond; + QemuMutex work_mutex; struct qemu_work_item *queued_work_first, *queued_work_last; bool thread_kicked; bool created; diff --git a/qom/cpu.c b/qom/cpu.c index eb9cfec..4e12598 100644 --- a/qom/cpu.c +++ b/qom/cpu.c @@ -316,6 +316,7 @@ static void cpu_common_initfn(Object *obj) cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs; QTAILQ_INIT(&cpu->breakpoints); QTAILQ_INIT(&cpu->watchpoints); + qemu_mutex_init(&cpu->work_mutex); } static void cpu_common_finalize(Object *obj)