Message ID | 1437574681-18362-2-git-send-email-pbonzini@redhat.com |
---|---|
State | New |
Headers | show |
On 07/22/2015 10:18 PM, Paolo Bonzini wrote: > Otherwise, grace periods are detected too early! We always use qemu_thread_create() in qemu. So I think we can do it like this: wrapped_fn() { rcu_register_thread(); call thread_fn() here rcu_unregister_thread(); } So we will never forget to call rcu_register_thread() when creating a new thread. Thanks Wen Congyang > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > cpus.c | 6 ++++++ > iothread.c | 5 +++++ > migration/migration.c | 4 ++++ > tests/test-rcu-list.c | 4 ++++ > util/rcu.c | 2 ++ > 5 files changed, 21 insertions(+) > > diff --git a/cpus.c b/cpus.c > index b00a423..a822ce3 100644 > --- a/cpus.c > +++ b/cpus.c > @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) > CPUState *cpu = arg; > int r; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_thread_get_self(cpu->thread); > cpu->thread_id = qemu_get_thread_id(); > @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) > sigset_t waitset; > int r; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_thread_get_self(cpu->thread); > cpu->thread_id = qemu_get_thread_id(); > @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) > { > CPUState *cpu = arg; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_tcg_init_cpu_signals(); > qemu_thread_get_self(cpu->thread); > diff --git a/iothread.c b/iothread.c > index 6d2a33f..da6ce7b 100644 > --- a/iothread.c > +++ b/iothread.c > @@ -18,6 +18,7 @@ > #include "sysemu/iothread.h" > #include "qmp-commands.h" > #include "qemu/error-report.h" > +#include "qemu/rcu.h" > > typedef ObjectClass IOThreadClass; > > @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque) > IOThread *iothread = opaque; > bool blocking; > > + rcu_register_thread(); > + > qemu_mutex_lock(&iothread->init_done_lock); > iothread->thread_id = qemu_get_thread_id(); > qemu_cond_signal(&iothread->init_done_cond); > @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque) > } > aio_context_release(iothread->ctx); > } > + > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/migration/migration.c b/migration/migration.c > index 86ca099..fd4f99b 100644 > --- a/migration/migration.c > +++ b/migration/migration.c > @@ -22,6 +22,7 @@ > #include "block/block.h" > #include "qapi/qmp/qerror.h" > #include "qemu/sockets.h" > +#include "qemu/rcu.h" > #include "migration/block.h" > #include "qemu/thread.h" > #include "qmp-commands.h" > @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque) > int64_t start_time = initial_time; > bool old_vm_running = false; > > + rcu_register_thread(); > + > qemu_savevm_state_header(s->file); > qemu_savevm_state_begin(s->file, &s->params); > > @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque) > qemu_bh_schedule(s->cleanup_bh); > qemu_mutex_unlock_iothread(); > > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c > index 4c5f62e..daa8bf4 100644 > --- a/tests/test-rcu-list.c > +++ b/tests/test-rcu-list.c > @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg) > long long n_reads_local = 0; > struct list_element *el; > > + rcu_register_thread(); > + > *(struct rcu_reader_data **)arg = &rcu_reader; > atomic_inc(&nthreadsrunning); > while (goflag == GOFLAG_INIT) { > @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg) > qemu_mutex_lock(&counts_mutex); > n_reads += n_reads_local; > qemu_mutex_unlock(&counts_mutex); > + > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/util/rcu.c b/util/rcu.c > index 7270151..cdcad67 100644 > --- a/util/rcu.c > +++ b/util/rcu.c > @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque) > { > struct rcu_head *node; > > + rcu_register_thread(); > + > for (;;) { > int tries = 0; > int n = atomic_read(&rcu_call_count); >
On 23/07/2015 04:56, Wen Congyang wrote: >> > Otherwise, grace periods are detected too early! > We always use qemu_thread_create() in qemu. So I think we can do it like this: > wrapped_fn() > { > rcu_register_thread(); > call thread_fn() here > rcu_unregister_thread(); > } > > So we will never forget to call rcu_register_thread() when creating a new thread. That's a good idea. Would you like to propose a patch for 2.5? Then we can also use it to run the thread_atexit notifiers and avoid the bug that Peter reported for OS X. Paolo
Am 22.07.2015 um 16:18 schrieb Paolo Bonzini: > Otherwise, grace periods are detected too early! I guess this or Wens proposal is still necessary for 2.4? > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > cpus.c | 6 ++++++ > iothread.c | 5 +++++ > migration/migration.c | 4 ++++ > tests/test-rcu-list.c | 4 ++++ > util/rcu.c | 2 ++ > 5 files changed, 21 insertions(+) > > diff --git a/cpus.c b/cpus.c > index b00a423..a822ce3 100644 > --- a/cpus.c > +++ b/cpus.c > @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) > CPUState *cpu = arg; > int r; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_thread_get_self(cpu->thread); > cpu->thread_id = qemu_get_thread_id(); > @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) > sigset_t waitset; > int r; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_thread_get_self(cpu->thread); > cpu->thread_id = qemu_get_thread_id(); > @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) > { > CPUState *cpu = arg; > > + rcu_register_thread(); > + > qemu_mutex_lock_iothread(); > qemu_tcg_init_cpu_signals(); > qemu_thread_get_self(cpu->thread); > diff --git a/iothread.c b/iothread.c > index 6d2a33f..da6ce7b 100644 > --- a/iothread.c > +++ b/iothread.c > @@ -18,6 +18,7 @@ > #include "sysemu/iothread.h" > #include "qmp-commands.h" > #include "qemu/error-report.h" > +#include "qemu/rcu.h" > > typedef ObjectClass IOThreadClass; > > @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque) > IOThread *iothread = opaque; > bool blocking; > > + rcu_register_thread(); > + > qemu_mutex_lock(&iothread->init_done_lock); > iothread->thread_id = qemu_get_thread_id(); > qemu_cond_signal(&iothread->init_done_cond); > @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque) > } > aio_context_release(iothread->ctx); > } > + > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/migration/migration.c b/migration/migration.c > index 86ca099..fd4f99b 100644 > --- a/migration/migration.c > +++ b/migration/migration.c > @@ -22,6 +22,7 @@ > #include "block/block.h" > #include "qapi/qmp/qerror.h" > #include "qemu/sockets.h" > +#include "qemu/rcu.h" > #include "migration/block.h" > #include "qemu/thread.h" > #include "qmp-commands.h" > @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque) > int64_t start_time = initial_time; > bool old_vm_running = false; > > + rcu_register_thread(); > + > qemu_savevm_state_header(s->file); > qemu_savevm_state_begin(s->file, &s->params); > > @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque) > qemu_bh_schedule(s->cleanup_bh); > qemu_mutex_unlock_iothread(); > > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c > index 4c5f62e..daa8bf4 100644 > --- a/tests/test-rcu-list.c > +++ b/tests/test-rcu-list.c > @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg) > long long n_reads_local = 0; > struct list_element *el; > > + rcu_register_thread(); > + > *(struct rcu_reader_data **)arg = &rcu_reader; > atomic_inc(&nthreadsrunning); > while (goflag == GOFLAG_INIT) { > @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg) > qemu_mutex_lock(&counts_mutex); > n_reads += n_reads_local; > qemu_mutex_unlock(&counts_mutex); > + > + rcu_unregister_thread(); > return NULL; > } > > diff --git a/util/rcu.c b/util/rcu.c > index 7270151..cdcad67 100644 > --- a/util/rcu.c > +++ b/util/rcu.c > @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque) > { > struct rcu_head *node; > > + rcu_register_thread(); > + > for (;;) { > int tries = 0; > int n = atomic_read(&rcu_call_count); >
On 23/07/2015 12:30, Christian Borntraeger wrote: > Am 22.07.2015 um 16:18 schrieb Paolo Bonzini: >> Otherwise, grace periods are detected too early! > > I guess this or Wens proposal is still necessary for 2.4? Yes. I think this is better for 2.4. There are threads that do not need RCU, for example the thread-pool.c worker threads, so it may just be simpler to add an assertion in rcu_register_thread. I'm just a bit wary of doing little more than the bare minimum in 2.4, because of the OS X failure that I didn't quite understand. Paolo > >> >> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> >> --- >> cpus.c | 6 ++++++ >> iothread.c | 5 +++++ >> migration/migration.c | 4 ++++ >> tests/test-rcu-list.c | 4 ++++ >> util/rcu.c | 2 ++ >> 5 files changed, 21 insertions(+) >> >> diff --git a/cpus.c b/cpus.c >> index b00a423..a822ce3 100644 >> --- a/cpus.c >> +++ b/cpus.c >> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) >> CPUState *cpu = arg; >> int r; >> >> + rcu_register_thread(); >> + >> qemu_mutex_lock_iothread(); >> qemu_thread_get_self(cpu->thread); >> cpu->thread_id = qemu_get_thread_id(); >> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) >> sigset_t waitset; >> int r; >> >> + rcu_register_thread(); >> + >> qemu_mutex_lock_iothread(); >> qemu_thread_get_self(cpu->thread); >> cpu->thread_id = qemu_get_thread_id(); >> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) >> { >> CPUState *cpu = arg; >> >> + rcu_register_thread(); >> + >> qemu_mutex_lock_iothread(); >> qemu_tcg_init_cpu_signals(); >> qemu_thread_get_self(cpu->thread); >> diff --git a/iothread.c b/iothread.c >> index 6d2a33f..da6ce7b 100644 >> --- a/iothread.c >> +++ b/iothread.c >> @@ -18,6 +18,7 @@ >> #include "sysemu/iothread.h" >> #include "qmp-commands.h" >> #include "qemu/error-report.h" >> +#include "qemu/rcu.h" >> >> typedef ObjectClass IOThreadClass; >> >> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque) >> IOThread *iothread = opaque; >> bool blocking; >> >> + rcu_register_thread(); >> + >> qemu_mutex_lock(&iothread->init_done_lock); >> iothread->thread_id = qemu_get_thread_id(); >> qemu_cond_signal(&iothread->init_done_cond); >> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque) >> } >> aio_context_release(iothread->ctx); >> } >> + >> + rcu_unregister_thread(); >> return NULL; >> } >> >> diff --git a/migration/migration.c b/migration/migration.c >> index 86ca099..fd4f99b 100644 >> --- a/migration/migration.c >> +++ b/migration/migration.c >> @@ -22,6 +22,7 @@ >> #include "block/block.h" >> #include "qapi/qmp/qerror.h" >> #include "qemu/sockets.h" >> +#include "qemu/rcu.h" >> #include "migration/block.h" >> #include "qemu/thread.h" >> #include "qmp-commands.h" >> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque) >> int64_t start_time = initial_time; >> bool old_vm_running = false; >> >> + rcu_register_thread(); >> + >> qemu_savevm_state_header(s->file); >> qemu_savevm_state_begin(s->file, &s->params); >> >> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque) >> qemu_bh_schedule(s->cleanup_bh); >> qemu_mutex_unlock_iothread(); >> >> + rcu_unregister_thread(); >> return NULL; >> } >> >> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c >> index 4c5f62e..daa8bf4 100644 >> --- a/tests/test-rcu-list.c >> +++ b/tests/test-rcu-list.c >> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg) >> long long n_reads_local = 0; >> struct list_element *el; >> >> + rcu_register_thread(); >> + >> *(struct rcu_reader_data **)arg = &rcu_reader; >> atomic_inc(&nthreadsrunning); >> while (goflag == GOFLAG_INIT) { >> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg) >> qemu_mutex_lock(&counts_mutex); >> n_reads += n_reads_local; >> qemu_mutex_unlock(&counts_mutex); >> + >> + rcu_unregister_thread(); >> return NULL; >> } >> >> diff --git a/util/rcu.c b/util/rcu.c >> index 7270151..cdcad67 100644 >> --- a/util/rcu.c >> +++ b/util/rcu.c >> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque) >> { >> struct rcu_head *node; >> >> + rcu_register_thread(); >> + >> for (;;) { >> int tries = 0; >> int n = atomic_read(&rcu_call_count); >> > > >
On 07/23/2015 06:42 PM, Paolo Bonzini wrote: > > > On 23/07/2015 12:30, Christian Borntraeger wrote: >> Am 22.07.2015 um 16:18 schrieb Paolo Bonzini: >>> Otherwise, grace periods are detected too early! >> >> I guess this or Wens proposal is still necessary for 2.4? > > Yes. I think this is better for 2.4. There are threads that do not > need RCU, for example the thread-pool.c worker threads, so it may just If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right? > be simpler to add an assertion in rcu_register_thread. I'm just a bit > wary of doing little more than the bare minimum in 2.4, because of the > OS X failure that I didn't quite understand. Which problem? I don't find it in the maillist. Do I miss something? Thanks Wen Congyang > > Paolo > >> >>> >>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> >>> --- >>> cpus.c | 6 ++++++ >>> iothread.c | 5 +++++ >>> migration/migration.c | 4 ++++ >>> tests/test-rcu-list.c | 4 ++++ >>> util/rcu.c | 2 ++ >>> 5 files changed, 21 insertions(+) >>> >>> diff --git a/cpus.c b/cpus.c >>> index b00a423..a822ce3 100644 >>> --- a/cpus.c >>> +++ b/cpus.c >>> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) >>> CPUState *cpu = arg; >>> int r; >>> >>> + rcu_register_thread(); >>> + >>> qemu_mutex_lock_iothread(); >>> qemu_thread_get_self(cpu->thread); >>> cpu->thread_id = qemu_get_thread_id(); >>> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) >>> sigset_t waitset; >>> int r; >>> >>> + rcu_register_thread(); >>> + >>> qemu_mutex_lock_iothread(); >>> qemu_thread_get_self(cpu->thread); >>> cpu->thread_id = qemu_get_thread_id(); >>> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) >>> { >>> CPUState *cpu = arg; >>> >>> + rcu_register_thread(); >>> + >>> qemu_mutex_lock_iothread(); >>> qemu_tcg_init_cpu_signals(); >>> qemu_thread_get_self(cpu->thread); >>> diff --git a/iothread.c b/iothread.c >>> index 6d2a33f..da6ce7b 100644 >>> --- a/iothread.c >>> +++ b/iothread.c >>> @@ -18,6 +18,7 @@ >>> #include "sysemu/iothread.h" >>> #include "qmp-commands.h" >>> #include "qemu/error-report.h" >>> +#include "qemu/rcu.h" >>> >>> typedef ObjectClass IOThreadClass; >>> >>> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque) >>> IOThread *iothread = opaque; >>> bool blocking; >>> >>> + rcu_register_thread(); >>> + >>> qemu_mutex_lock(&iothread->init_done_lock); >>> iothread->thread_id = qemu_get_thread_id(); >>> qemu_cond_signal(&iothread->init_done_cond); >>> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque) >>> } >>> aio_context_release(iothread->ctx); >>> } >>> + >>> + rcu_unregister_thread(); >>> return NULL; >>> } >>> >>> diff --git a/migration/migration.c b/migration/migration.c >>> index 86ca099..fd4f99b 100644 >>> --- a/migration/migration.c >>> +++ b/migration/migration.c >>> @@ -22,6 +22,7 @@ >>> #include "block/block.h" >>> #include "qapi/qmp/qerror.h" >>> #include "qemu/sockets.h" >>> +#include "qemu/rcu.h" >>> #include "migration/block.h" >>> #include "qemu/thread.h" >>> #include "qmp-commands.h" >>> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque) >>> int64_t start_time = initial_time; >>> bool old_vm_running = false; >>> >>> + rcu_register_thread(); >>> + >>> qemu_savevm_state_header(s->file); >>> qemu_savevm_state_begin(s->file, &s->params); >>> >>> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque) >>> qemu_bh_schedule(s->cleanup_bh); >>> qemu_mutex_unlock_iothread(); >>> >>> + rcu_unregister_thread(); >>> return NULL; >>> } >>> >>> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c >>> index 4c5f62e..daa8bf4 100644 >>> --- a/tests/test-rcu-list.c >>> +++ b/tests/test-rcu-list.c >>> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg) >>> long long n_reads_local = 0; >>> struct list_element *el; >>> >>> + rcu_register_thread(); >>> + >>> *(struct rcu_reader_data **)arg = &rcu_reader; >>> atomic_inc(&nthreadsrunning); >>> while (goflag == GOFLAG_INIT) { >>> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg) >>> qemu_mutex_lock(&counts_mutex); >>> n_reads += n_reads_local; >>> qemu_mutex_unlock(&counts_mutex); >>> + >>> + rcu_unregister_thread(); >>> return NULL; >>> } >>> >>> diff --git a/util/rcu.c b/util/rcu.c >>> index 7270151..cdcad67 100644 >>> --- a/util/rcu.c >>> +++ b/util/rcu.c >>> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque) >>> { >>> struct rcu_head *node; >>> >>> + rcu_register_thread(); >>> + >>> for (;;) { >>> int tries = 0; >>> int n = atomic_read(&rcu_call_count); >>> >> >> >> > > . >
On 23/07/2015 13:04, Wen Congyang wrote: > > Yes. I think this is better for 2.4. There are threads that do not > > need RCU, for example the thread-pool.c worker threads, so it may just > > If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right? Every rcu_register_thread() makes synchronize_rcu() a little slower. >> > be simpler to add an assertion in rcu_register_thread. I'm just a bit >> > wary of doing little more than the bare minimum in 2.4, because of the >> > OS X failure that I didn't quite understand. > Which problem? I don't find it in the maillist. http://article.gmane.org/gmane.comp.emulators.qemu/351548 Paolo
At 2015/7/23 19:08, Paolo Bonzini Wrote: > > > On 23/07/2015 13:04, Wen Congyang wrote: >>> Yes. I think this is better for 2.4. There are threads that do not >>> need RCU, for example the thread-pool.c worker threads, so it may just >> >> If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right? > > Every rcu_register_thread() makes synchronize_rcu() a little slower. Yes, but synchronize_rcu() is very slow... > >>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit >>>> wary of doing little more than the bare minimum in 2.4, because of the >>>> OS X failure that I didn't quite understand. >> Which problem? I don't find it in the maillist. > > http://article.gmane.org/gmane.comp.emulators.qemu/351548 Hmm, I guess rcu_reader is invalid when pthread key is destroyed. pthread key and __thread variable, which is destroyed first? I don't find any document to describe it. Thanks Wen Congyang > > Paolo > >
On 23/07/2015 14:59, Wen Congyang wrote: >>> >>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is >>> it right? >> >> Every rcu_register_thread() makes synchronize_rcu() a little slower. > > Yes, but synchronize_rcu() is very slow... Hmm, worse, rcu_register_thread() if called together with synchronize_rcu() it waits for the synchronize_rcu() to finish. :/ Paolo >> >>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit >>>>> wary of doing little more than the bare minimum in 2.4, because of the >>>>> OS X failure that I didn't quite understand. >>> Which problem? I don't find it in the maillist. >> >> http://article.gmane.org/gmane.comp.emulators.qemu/351548 > > Hmm, I guess rcu_reader is invalid when pthread key is destroyed. > pthread key and __thread > variable, which is destroyed first? I don't find any document to > describe it.
On 07/24/2015 12:58 AM, Paolo Bonzini wrote: > > > On 23/07/2015 14:59, Wen Congyang wrote: >>>> >>>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is >>>> it right? >>> >>> Every rcu_register_thread() makes synchronize_rcu() a little slower. >> >> Yes, but synchronize_rcu() is very slow... > > Hmm, worse, rcu_register_thread() if called together with > synchronize_rcu() it waits for the synchronize_rcu() to finish. :/ Yes, it is a problem. Thanks Wen Congyang > > Paolo > >>> >>>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit >>>>>> wary of doing little more than the bare minimum in 2.4, because of the >>>>>> OS X failure that I didn't quite understand. >>>> Which problem? I don't find it in the maillist. >>> >>> http://article.gmane.org/gmane.comp.emulators.qemu/351548 >> >> Hmm, I guess rcu_reader is invalid when pthread key is destroyed. >> pthread key and __thread >> variable, which is destroyed first? I don't find any document to >> describe it. >
diff --git a/cpus.c b/cpus.c index b00a423..a822ce3 100644 --- a/cpus.c +++ b/cpus.c @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) CPUState *cpu = arg; int r; + rcu_register_thread(); + qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) sigset_t waitset; int r; + rcu_register_thread(); + qemu_mutex_lock_iothread(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; + rcu_register_thread(); + qemu_mutex_lock_iothread(); qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); diff --git a/iothread.c b/iothread.c index 6d2a33f..da6ce7b 100644 --- a/iothread.c +++ b/iothread.c @@ -18,6 +18,7 @@ #include "sysemu/iothread.h" #include "qmp-commands.h" #include "qemu/error-report.h" +#include "qemu/rcu.h" typedef ObjectClass IOThreadClass; @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque) IOThread *iothread = opaque; bool blocking; + rcu_register_thread(); + qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque) } aio_context_release(iothread->ctx); } + + rcu_unregister_thread(); return NULL; } diff --git a/migration/migration.c b/migration/migration.c index 86ca099..fd4f99b 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -22,6 +22,7 @@ #include "block/block.h" #include "qapi/qmp/qerror.h" #include "qemu/sockets.h" +#include "qemu/rcu.h" #include "migration/block.h" #include "qemu/thread.h" #include "qmp-commands.h" @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque) int64_t start_time = initial_time; bool old_vm_running = false; + rcu_register_thread(); + qemu_savevm_state_header(s->file); qemu_savevm_state_begin(s->file, &s->params); @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque) qemu_bh_schedule(s->cleanup_bh); qemu_mutex_unlock_iothread(); + rcu_unregister_thread(); return NULL; } diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c index 4c5f62e..daa8bf4 100644 --- a/tests/test-rcu-list.c +++ b/tests/test-rcu-list.c @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg) long long n_reads_local = 0; struct list_element *el; + rcu_register_thread(); + *(struct rcu_reader_data **)arg = &rcu_reader; atomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg) qemu_mutex_lock(&counts_mutex); n_reads += n_reads_local; qemu_mutex_unlock(&counts_mutex); + + rcu_unregister_thread(); return NULL; } diff --git a/util/rcu.c b/util/rcu.c index 7270151..cdcad67 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque) { struct rcu_head *node; + rcu_register_thread(); + for (;;) { int tries = 0; int n = atomic_read(&rcu_call_count);
Otherwise, grace periods are detected too early! Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- cpus.c | 6 ++++++ iothread.c | 5 +++++ migration/migration.c | 4 ++++ tests/test-rcu-list.c | 4 ++++ util/rcu.c | 2 ++ 5 files changed, 21 insertions(+)