Message ID | 1435330053-18733-14-git-send-email-fred.konrad@greensocs.com |
---|---|
State | New |
Headers | show |
On 26/06/2015 16:47, fred.konrad@greensocs.com wrote: > diff --git a/cpu-exec.c b/cpu-exec.c > index de256d6..d6442cd 100644 > --- a/cpu-exec.c > +++ b/cpu-exec.c Nice solution. However I still have a few questions that need clarification. > @@ -382,6 +382,11 @@ int cpu_exec(CPUArchState *env) > volatile bool have_tb_lock = false; > #endif > > + if (async_safe_work_pending()) { > + cpu->exit_request = 1; > + return 0; > + } Perhaps move this to cpu_can_run()? > if (cpu->halted) { > if (!cpu_has_work(cpu)) { > return EXCP_HALTED; > diff --git a/cpus.c b/cpus.c > index 5f13d73..aee445a 100644 > --- a/cpus.c > +++ b/cpus.c > @@ -75,7 +75,7 @@ bool cpu_is_stopped(CPUState *cpu) > > bool cpu_thread_is_idle(CPUState *cpu) > { > - if (cpu->stop || cpu->queued_work_first) { > + if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) { > return false; > } > if (cpu_is_stopped(cpu)) { > @@ -892,6 +892,69 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) > qemu_cpu_kick(cpu); > } > > +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), > + void *data) > +{ Do you need a mutex to protect this data structure? I would use one even if not strictly necessary, to avoid introducing new BQL-protected structures. Also, can you add a count of how many such work items exist in the whole system, in order to speed up async_safe_work_pending? > + struct qemu_work_item *wi; > + > + wi = g_malloc0(sizeof(struct qemu_work_item)); > + wi->func = func; > + wi->data = data; > + wi->free = true; > + if (cpu->queued_safe_work_first == NULL) { > + cpu->queued_safe_work_first = wi; > + } else { > + cpu->queued_safe_work_last->next = wi; > + } > + cpu->queued_safe_work_last = wi; > + wi->next = NULL; > + wi->done = false; > + > + CPU_FOREACH(cpu) { > + qemu_cpu_kick_thread(cpu); > + } > +} > + > +static void flush_queued_safe_work(CPUState *cpu) > +{ > + struct qemu_work_item *wi; > + CPUState *other_cpu; > + > + if (cpu->queued_safe_work_first == NULL) { > + return; > + } > + > + CPU_FOREACH(other_cpu) { > + if (other_cpu->tcg_executing != 0) { This causes the thread to busy wait until everyone has exited, right? Not a big deal, but worth a comment. Paolo > + return; > + } > + } > + > + while ((wi = cpu->queued_safe_work_first)) { > + cpu->queued_safe_work_first = wi->next; > + wi->func(wi->data); > + wi->done = true; > + if (wi->free) { > + g_free(wi); > + } > + } > + cpu->queued_safe_work_last = NULL; > + qemu_cond_broadcast(&qemu_work_cond); > +} > + > +bool async_safe_work_pending(void) > +{ > + CPUState *cpu; > + > + CPU_FOREACH(cpu) { > + if (cpu->queued_safe_work_first) { > + return true; > + } > + } > + > + return false; > +} > +
On 26/06/2015 17:35, Paolo Bonzini wrote: > On 26/06/2015 16:47, fred.konrad@greensocs.com wrote: >> diff --git a/cpu-exec.c b/cpu-exec.c >> index de256d6..d6442cd 100644 >> --- a/cpu-exec.c >> +++ b/cpu-exec.c > Nice solution. However I still have a few questions that need > clarification. > >> @@ -382,6 +382,11 @@ int cpu_exec(CPUArchState *env) >> volatile bool have_tb_lock = false; >> #endif >> >> + if (async_safe_work_pending()) { >> + cpu->exit_request = 1; >> + return 0; >> + } > Perhaps move this to cpu_can_run()? Yes why not. > >> if (cpu->halted) { >> if (!cpu_has_work(cpu)) { >> return EXCP_HALTED; >> diff --git a/cpus.c b/cpus.c >> index 5f13d73..aee445a 100644 >> --- a/cpus.c >> +++ b/cpus.c >> @@ -75,7 +75,7 @@ bool cpu_is_stopped(CPUState *cpu) >> >> bool cpu_thread_is_idle(CPUState *cpu) >> { >> - if (cpu->stop || cpu->queued_work_first) { >> + if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) { >> return false; >> } >> if (cpu_is_stopped(cpu)) { >> @@ -892,6 +892,69 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) >> qemu_cpu_kick(cpu); >> } >> >> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), >> + void *data) >> +{ > Do you need a mutex to protect this data structure? I would use one > even if not strictly necessary, to avoid introducing new BQL-protected > structures. For the moment it's called by tb_invalidate and tb_flush_safe the second lacks a tb_lock/unlock which should be added. I don't need an other mutex expect if this is used elsewhere? > Also, can you add a count of how many such work items exist in the whole > system, in order to speed up async_safe_work_pending? Yes that makes sense. >> + struct qemu_work_item *wi; >> + >> + wi = g_malloc0(sizeof(struct qemu_work_item)); >> + wi->func = func; >> + wi->data = data; >> + wi->free = true; >> + if (cpu->queued_safe_work_first == NULL) { >> + cpu->queued_safe_work_first = wi; >> + } else { >> + cpu->queued_safe_work_last->next = wi; >> + } >> + cpu->queued_safe_work_last = wi; >> + wi->next = NULL; >> + wi->done = false; >> + >> + CPU_FOREACH(cpu) { >> + qemu_cpu_kick_thread(cpu); >> + } >> +} >> + >> +static void flush_queued_safe_work(CPUState *cpu) >> +{ >> + struct qemu_work_item *wi; >> + CPUState *other_cpu; >> + >> + if (cpu->queued_safe_work_first == NULL) { >> + return; >> + } >> + >> + CPU_FOREACH(other_cpu) { >> + if (other_cpu->tcg_executing != 0) { > This causes the thread to busy wait until everyone has exited, right? > Not a big deal, but worth a comment. Right. Fred > Paolo > >> + return; >> + } >> + } >> + >> + while ((wi = cpu->queued_safe_work_first)) { >> + cpu->queued_safe_work_first = wi->next; >> + wi->func(wi->data); >> + wi->done = true; >> + if (wi->free) { >> + g_free(wi); >> + } >> + } >> + cpu->queued_safe_work_last = NULL; >> + qemu_cond_broadcast(&qemu_work_cond); >> +} >> + >> +bool async_safe_work_pending(void) >> +{ >> + CPUState *cpu; >> + >> + CPU_FOREACH(cpu) { >> + if (cpu->queued_safe_work_first) { >> + return true; >> + } >> + } >> + >> + return false; >> +} >> +
On 26/06/2015 18:09, Frederic Konrad wrote: >>> >>> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void >>> *data), >>> + void *data) >>> +{ >> Do you need a mutex to protect this data structure? I would use one >> even if not strictly necessary, to avoid introducing new BQL-protected >> structures. > > For the moment it's called by tb_invalidate and tb_flush_safe the second > lacks a > tb_lock/unlock which should be added. I don't need an other mutex expect > if this is > used elsewhere? In any case, the locking policy should be documented. At which point you realize that protecting a CPU's queued_safe_work_{first,next} fields with the tb_lock is a bit weird. :) I would add a mutex inside CPUState, and then later we could also use it for regular run_on_cpu/async_run_on_cpu. Paolo
On 26/06/2015 18:23, Paolo Bonzini wrote: > > On 26/06/2015 18:09, Frederic Konrad wrote: >>>> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void >>>> *data), >>>> + void *data) >>>> +{ >>> Do you need a mutex to protect this data structure? I would use one >>> even if not strictly necessary, to avoid introducing new BQL-protected >>> structures. >> For the moment it's called by tb_invalidate and tb_flush_safe the second >> lacks a >> tb_lock/unlock which should be added. I don't need an other mutex expect >> if this is >> used elsewhere? > In any case, the locking policy should be documented. > > At which point you realize that protecting a CPU's > queued_safe_work_{first,next} fields with the tb_lock is a bit weird. :) > I would add a mutex inside CPUState, and then later we could also use > it for regular run_on_cpu/async_run_on_cpu. > > Paolo Ok that makes sense :). Fred
diff --git a/cpu-exec.c b/cpu-exec.c index de256d6..d6442cd 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -382,6 +382,11 @@ int cpu_exec(CPUArchState *env) volatile bool have_tb_lock = false; #endif + if (async_safe_work_pending()) { + cpu->exit_request = 1; + return 0; + } + if (cpu->halted) { if (!cpu_has_work(cpu)) { return EXCP_HALTED; diff --git a/cpus.c b/cpus.c index 5f13d73..aee445a 100644 --- a/cpus.c +++ b/cpus.c @@ -75,7 +75,7 @@ bool cpu_is_stopped(CPUState *cpu) bool cpu_thread_is_idle(CPUState *cpu) { - if (cpu->stop || cpu->queued_work_first) { + if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) { return false; } if (cpu_is_stopped(cpu)) { @@ -892,6 +892,69 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) qemu_cpu_kick(cpu); } +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), + void *data) +{ + struct qemu_work_item *wi; + + wi = g_malloc0(sizeof(struct qemu_work_item)); + wi->func = func; + wi->data = data; + wi->free = true; + if (cpu->queued_safe_work_first == NULL) { + cpu->queued_safe_work_first = wi; + } else { + cpu->queued_safe_work_last->next = wi; + } + cpu->queued_safe_work_last = wi; + wi->next = NULL; + wi->done = false; + + CPU_FOREACH(cpu) { + qemu_cpu_kick_thread(cpu); + } +} + +static void flush_queued_safe_work(CPUState *cpu) +{ + struct qemu_work_item *wi; + CPUState *other_cpu; + + if (cpu->queued_safe_work_first == NULL) { + return; + } + + CPU_FOREACH(other_cpu) { + if (other_cpu->tcg_executing != 0) { + return; + } + } + + while ((wi = cpu->queued_safe_work_first)) { + cpu->queued_safe_work_first = wi->next; + wi->func(wi->data); + wi->done = true; + if (wi->free) { + g_free(wi); + } + } + cpu->queued_safe_work_last = NULL; + qemu_cond_broadcast(&qemu_work_cond); +} + +bool async_safe_work_pending(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->queued_safe_work_first) { + return true; + } + } + + return false; +} + static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; @@ -919,6 +982,9 @@ static void qemu_wait_io_event_common(CPUState *cpu) cpu->stopped = true; qemu_cond_signal(&qemu_pause_cond); } + qemu_mutex_unlock_iothread(); + flush_queued_safe_work(cpu); + qemu_mutex_lock_iothread(); flush_queued_work(cpu); cpu->thread_kicked = false; } diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 1464afa..8f3fe56 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -260,6 +260,7 @@ struct CPUState { bool running; struct QemuCond *halt_cond; struct qemu_work_item *queued_work_first, *queued_work_last; + struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last; bool thread_kicked; bool created; bool stop; @@ -548,6 +549,26 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); /** + * async_run_safe_work_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously + * when all the VCPUs are outside their loop. + */ +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), + void *data); + +/** + * async_safe_work_pending: + * + * Check whether any safe work is pending on any VCPUs. + * Returns: @true if a safe work is pending, @false otherwise. + */ +bool async_safe_work_pending(void); + +/** * qemu_get_cpu: * @index: The CPUState@cpu_index value of the CPU to obtain. *