Message ID | 20211004120208.7409-4-david@redhat.com |
---|---|
State | New |
Headers | show |
Series | util/oslib-posix: Support MADV_POPULATE_WRITE for os_mem_prealloc() | expand |
* David Hildenbrand (david@redhat.com) wrote: > Let's minimize the number of global variables to prepare for > os_mem_prealloc() getting called concurrently and make the code a bit > easier to read. > > The only consumer that really needs a global variable is the sigbus > handler, which will require protection via a mutex in the future either way > as we cannot concurrently mess with the SIGBUS handler. > > Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > util/oslib-posix.c | 73 +++++++++++++++++++++++++++++----------------- > 1 file changed, 47 insertions(+), 26 deletions(-) > > diff --git a/util/oslib-posix.c b/util/oslib-posix.c > index cb89e07770..cf2ead54ad 100644 > --- a/util/oslib-posix.c > +++ b/util/oslib-posix.c > @@ -73,21 +73,30 @@ > > #define MAX_MEM_PREALLOC_THREAD_COUNT 16 > > +struct MemsetThread; > + > +typedef struct MemsetContext { > + bool all_threads_created; > + bool any_thread_failed; > + struct MemsetThread *threads; > + int num_threads; > +} MemsetContext; > + > struct MemsetThread { > char *addr; > size_t numpages; > size_t hpagesize; > QemuThread pgthread; > sigjmp_buf env; > + MemsetContext *context; > }; > typedef struct MemsetThread MemsetThread; > > -static MemsetThread *memset_thread; > -static int memset_num_threads; > +/* used by sigbus_handler() */ > +static MemsetContext *sigbus_memset_context; > > static QemuMutex page_mutex; > static QemuCond page_cond; > -static bool threads_created_flag; Is there a reason you didn't lift page_mutex and page_cond into the MemsetContext ? (You don't need to change it for this series, just a thought; another thought is that I think we hav ea few threadpools like this with hooks to check they've all been created and to do something if one fails). Dave > int qemu_get_thread_id(void) > { > @@ -438,10 +447,13 @@ const char *qemu_get_exec_dir(void) > static void sigbus_handler(int signal) > { > int i; > - if (memset_thread) { > - for (i = 0; i < memset_num_threads; i++) { > - if (qemu_thread_is_self(&memset_thread[i].pgthread)) { > - siglongjmp(memset_thread[i].env, 1); > + > + if (sigbus_memset_context) { > + for (i = 0; i < sigbus_memset_context->num_threads; i++) { > + MemsetThread *thread = &sigbus_memset_context->threads[i]; > + > + if (qemu_thread_is_self(&thread->pgthread)) { > + siglongjmp(thread->env, 1); > } > } > } > @@ -459,7 +471,7 @@ static void *do_touch_pages(void *arg) > * clearing until all threads have been created. > */ > qemu_mutex_lock(&page_mutex); > - while(!threads_created_flag){ > + while (!memset_args->context->all_threads_created) { > qemu_cond_wait(&page_cond, &page_mutex); > } > qemu_mutex_unlock(&page_mutex); > @@ -502,7 +514,7 @@ static void *do_madv_populate_write_pages(void *arg) > > /* See do_touch_pages(). */ > qemu_mutex_lock(&page_mutex); > - while (!threads_created_flag) { > + while (!memset_args->context->all_threads_created) { > qemu_cond_wait(&page_cond, &page_mutex); > } > qemu_mutex_unlock(&page_mutex); > @@ -529,6 +541,9 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, > int smp_cpus, bool use_madv_populate_write) > { > static gsize initialized = 0; > + MemsetContext context = { > + .num_threads = get_memset_num_threads(smp_cpus), > + }; > size_t numpages_per_thread, leftover; > void *(*touch_fn)(void *); > int ret = 0, i = 0; > @@ -546,35 +561,41 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, > touch_fn = do_touch_pages; > } > > - threads_created_flag = false; > - memset_num_threads = get_memset_num_threads(smp_cpus); > - memset_thread = g_new0(MemsetThread, memset_num_threads); > - numpages_per_thread = numpages / memset_num_threads; > - leftover = numpages % memset_num_threads; > - for (i = 0; i < memset_num_threads; i++) { > - memset_thread[i].addr = addr; > - memset_thread[i].numpages = numpages_per_thread + (i < leftover); > - memset_thread[i].hpagesize = hpagesize; > - qemu_thread_create(&memset_thread[i].pgthread, "touch_pages", > - touch_fn, &memset_thread[i], > + context.threads = g_new0(MemsetThread, context.num_threads); > + numpages_per_thread = numpages / context.num_threads; > + leftover = numpages % context.num_threads; > + for (i = 0; i < context.num_threads; i++) { > + context.threads[i].addr = addr; > + context.threads[i].numpages = numpages_per_thread + (i < leftover); > + context.threads[i].hpagesize = hpagesize; > + context.threads[i].context = &context; > + qemu_thread_create(&context.threads[i].pgthread, "touch_pages", > + touch_fn, &context.threads[i], > QEMU_THREAD_JOINABLE); > - addr += memset_thread[i].numpages * hpagesize; > + addr += context.threads[i].numpages * hpagesize; > + } > + > + if (!use_madv_populate_write) { > + sigbus_memset_context = &context; > } > > qemu_mutex_lock(&page_mutex); > - threads_created_flag = true; > + context.all_threads_created = true; > qemu_cond_broadcast(&page_cond); > qemu_mutex_unlock(&page_mutex); > > - for (i = 0; i < memset_num_threads; i++) { > - int tmp = (uintptr_t)qemu_thread_join(&memset_thread[i].pgthread); > + for (i = 0; i < context.num_threads; i++) { > + int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); > > if (tmp) { > ret = tmp; > } > } > - g_free(memset_thread); > - memset_thread = NULL; > + > + if (!use_madv_populate_write) { > + sigbus_memset_context = NULL; > + } > + g_free(context.threads); > > return ret; > } > -- > 2.31.1 >
On 07.10.21 12:05, Dr. David Alan Gilbert wrote: > * David Hildenbrand (david@redhat.com) wrote: >> Let's minimize the number of global variables to prepare for >> os_mem_prealloc() getting called concurrently and make the code a bit >> easier to read. >> >> The only consumer that really needs a global variable is the sigbus >> handler, which will require protection via a mutex in the future either way >> as we cannot concurrently mess with the SIGBUS handler. >> >> Reviewed-by: Daniel P. Berrangé <berrange@redhat.com> >> Signed-off-by: David Hildenbrand <david@redhat.com> >> --- >> util/oslib-posix.c | 73 +++++++++++++++++++++++++++++----------------- >> 1 file changed, 47 insertions(+), 26 deletions(-) >> >> diff --git a/util/oslib-posix.c b/util/oslib-posix.c >> index cb89e07770..cf2ead54ad 100644 >> --- a/util/oslib-posix.c >> +++ b/util/oslib-posix.c >> @@ -73,21 +73,30 @@ >> >> #define MAX_MEM_PREALLOC_THREAD_COUNT 16 >> >> +struct MemsetThread; >> + >> +typedef struct MemsetContext { >> + bool all_threads_created; >> + bool any_thread_failed; >> + struct MemsetThread *threads; >> + int num_threads; >> +} MemsetContext; >> + >> struct MemsetThread { >> char *addr; >> size_t numpages; >> size_t hpagesize; >> QemuThread pgthread; >> sigjmp_buf env; >> + MemsetContext *context; >> }; >> typedef struct MemsetThread MemsetThread; >> >> -static MemsetThread *memset_thread; >> -static int memset_num_threads; >> +/* used by sigbus_handler() */ >> +static MemsetContext *sigbus_memset_context; >> >> static QemuMutex page_mutex; >> static QemuCond page_cond; >> -static bool threads_created_flag; > > Is there a reason you didn't lift page_mutex and page_cond into the > MemsetContext ? Mostly for simplicity and I didn't think that it will really make a difference in practice. In patch #6 I spelled out that this was done: "Note that page_mutex and page_cond are shared between concurrent invocations, which shouldn't be a problem." > > (You don't need to change it for this series, just a thought; > another thought is that I think we hav ea few threadpools like this > with hooks to check they've all been created and to do something if one > fails). I can look into that as an add-on series once I have some spare cycles. Thanks!
diff --git a/util/oslib-posix.c b/util/oslib-posix.c index cb89e07770..cf2ead54ad 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -73,21 +73,30 @@ #define MAX_MEM_PREALLOC_THREAD_COUNT 16 +struct MemsetThread; + +typedef struct MemsetContext { + bool all_threads_created; + bool any_thread_failed; + struct MemsetThread *threads; + int num_threads; +} MemsetContext; + struct MemsetThread { char *addr; size_t numpages; size_t hpagesize; QemuThread pgthread; sigjmp_buf env; + MemsetContext *context; }; typedef struct MemsetThread MemsetThread; -static MemsetThread *memset_thread; -static int memset_num_threads; +/* used by sigbus_handler() */ +static MemsetContext *sigbus_memset_context; static QemuMutex page_mutex; static QemuCond page_cond; -static bool threads_created_flag; int qemu_get_thread_id(void) { @@ -438,10 +447,13 @@ const char *qemu_get_exec_dir(void) static void sigbus_handler(int signal) { int i; - if (memset_thread) { - for (i = 0; i < memset_num_threads; i++) { - if (qemu_thread_is_self(&memset_thread[i].pgthread)) { - siglongjmp(memset_thread[i].env, 1); + + if (sigbus_memset_context) { + for (i = 0; i < sigbus_memset_context->num_threads; i++) { + MemsetThread *thread = &sigbus_memset_context->threads[i]; + + if (qemu_thread_is_self(&thread->pgthread)) { + siglongjmp(thread->env, 1); } } } @@ -459,7 +471,7 @@ static void *do_touch_pages(void *arg) * clearing until all threads have been created. */ qemu_mutex_lock(&page_mutex); - while(!threads_created_flag){ + while (!memset_args->context->all_threads_created) { qemu_cond_wait(&page_cond, &page_mutex); } qemu_mutex_unlock(&page_mutex); @@ -502,7 +514,7 @@ static void *do_madv_populate_write_pages(void *arg) /* See do_touch_pages(). */ qemu_mutex_lock(&page_mutex); - while (!threads_created_flag) { + while (!memset_args->context->all_threads_created) { qemu_cond_wait(&page_cond, &page_mutex); } qemu_mutex_unlock(&page_mutex); @@ -529,6 +541,9 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, int smp_cpus, bool use_madv_populate_write) { static gsize initialized = 0; + MemsetContext context = { + .num_threads = get_memset_num_threads(smp_cpus), + }; size_t numpages_per_thread, leftover; void *(*touch_fn)(void *); int ret = 0, i = 0; @@ -546,35 +561,41 @@ static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, touch_fn = do_touch_pages; } - threads_created_flag = false; - memset_num_threads = get_memset_num_threads(smp_cpus); - memset_thread = g_new0(MemsetThread, memset_num_threads); - numpages_per_thread = numpages / memset_num_threads; - leftover = numpages % memset_num_threads; - for (i = 0; i < memset_num_threads; i++) { - memset_thread[i].addr = addr; - memset_thread[i].numpages = numpages_per_thread + (i < leftover); - memset_thread[i].hpagesize = hpagesize; - qemu_thread_create(&memset_thread[i].pgthread, "touch_pages", - touch_fn, &memset_thread[i], + context.threads = g_new0(MemsetThread, context.num_threads); + numpages_per_thread = numpages / context.num_threads; + leftover = numpages % context.num_threads; + for (i = 0; i < context.num_threads; i++) { + context.threads[i].addr = addr; + context.threads[i].numpages = numpages_per_thread + (i < leftover); + context.threads[i].hpagesize = hpagesize; + context.threads[i].context = &context; + qemu_thread_create(&context.threads[i].pgthread, "touch_pages", + touch_fn, &context.threads[i], QEMU_THREAD_JOINABLE); - addr += memset_thread[i].numpages * hpagesize; + addr += context.threads[i].numpages * hpagesize; + } + + if (!use_madv_populate_write) { + sigbus_memset_context = &context; } qemu_mutex_lock(&page_mutex); - threads_created_flag = true; + context.all_threads_created = true; qemu_cond_broadcast(&page_cond); qemu_mutex_unlock(&page_mutex); - for (i = 0; i < memset_num_threads; i++) { - int tmp = (uintptr_t)qemu_thread_join(&memset_thread[i].pgthread); + for (i = 0; i < context.num_threads; i++) { + int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); if (tmp) { ret = tmp; } } - g_free(memset_thread); - memset_thread = NULL; + + if (!use_madv_populate_write) { + sigbus_memset_context = NULL; + } + g_free(context.threads); return ret; }