Message ID | 1369926481-20720-10-git-send-email-afaerber@suse.de |
---|---|
State | New |
Headers | show |
On Thu, 30 May 2013 17:08:01 +0200 Andreas Färber <afaerber@suse.de> wrote: > Previously it would search for the first CPU with paging enabled and > retrieve memory mappings from this and any following CPUs or return an > error if that fails. > > Instead walk all CPUs and if paging is enabled retrieve the memory > mappings. Fail only if retrieving memory mappings on a CPU with paging > enabled fails. > > This not only allows to more easily use one qemu_for_each_cpu() instead > of open-coding two CPU loops and drops find_first_paging_enabled_cpu() > but removes the implicit assumption of heterogeneity between CPUs n..N > in having paging enabled. > > Cc: Wen Congyang <wency@cn.fujitsu.com> > Signed-off-by: Andreas Färber <afaerber@suse.de> > --- > memory_mapping.c | 42 +++++++++++++++++++++++------------------- > 1 file changed, 23 insertions(+), 19 deletions(-) > > diff --git a/memory_mapping.c b/memory_mapping.c > index 481530a..55ac2b8 100644 > --- a/memory_mapping.c > +++ b/memory_mapping.c > @@ -165,35 +165,39 @@ void memory_mapping_list_init(MemoryMappingList *list) > QTAILQ_INIT(&list->head); > } > > -static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) > +typedef struct GetGuestMemoryMappingData { > + MemoryMappingList *list; > + int ret; > +} GetGuestMemoryMappingData; > + > +static void qemu_get_one_guest_memory_mapping(CPUState *cpu, void *data) > { > - CPUArchState *env; > + GetGuestMemoryMappingData *s = data; > + int ret; > > - for (env = start_cpu; env != NULL; env = env->next_cpu) { > - if (cpu_paging_enabled(ENV_GET_CPU(env))) { > - return env; > - } > + if (!cpu_paging_enabled(cpu) || s->ret == -1) { > + return; > + } > + ret = cpu_get_memory_mapping(cpu, s->list); > + if (ret < 0) { > + s->ret = -1; > + } else { > + s->ret = 0; > } Does cpu_get_memory_mapping() ever return a positive value or a negative value != -1 ? It it doesn't, I'd do: s->ret = cpu_get_memory_mapping(); assert(s->ret == 0 || s->ret == -1); > - > - return NULL; > } > > int qemu_get_guest_memory_mapping(MemoryMappingList *list) > { > - CPUArchState *env, *first_paging_enabled_cpu; > + GetGuestMemoryMappingData s = { > + .list = list, > + .ret = -2, > + }; > RAMBlock *block; > ram_addr_t offset, length; > - int ret; > > - first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); > - if (first_paging_enabled_cpu) { > - for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) { > - ret = cpu_get_memory_mapping(ENV_GET_CPU(env), list); > - if (ret < 0) { > - return -1; > - } > - } > - return 0; > + qemu_for_each_cpu(qemu_get_one_guest_memory_mapping, &s); > + if (s.ret != -2) { > + return s.ret; > } I see we ignore error in dump_init(). Doesn't matter today because x86_cpu_get_memory_mapping() never fails. But as you're doing this cleanup, shouldn't you add proper error handling? > > /*
Am 31.05.2013 16:14, schrieb Luiz Capitulino: > On Thu, 30 May 2013 17:08:01 +0200 > Andreas Färber <afaerber@suse.de> wrote: > >> Previously it would search for the first CPU with paging enabled and >> retrieve memory mappings from this and any following CPUs or return an >> error if that fails. >> >> Instead walk all CPUs and if paging is enabled retrieve the memory >> mappings. Fail only if retrieving memory mappings on a CPU with paging >> enabled fails. >> >> This not only allows to more easily use one qemu_for_each_cpu() instead >> of open-coding two CPU loops and drops find_first_paging_enabled_cpu() >> but removes the implicit assumption of heterogeneity between CPUs n..N >> in having paging enabled. >> >> Cc: Wen Congyang <wency@cn.fujitsu.com> >> Signed-off-by: Andreas Färber <afaerber@suse.de> >> --- >> memory_mapping.c | 42 +++++++++++++++++++++++------------------- >> 1 file changed, 23 insertions(+), 19 deletions(-) >> >> diff --git a/memory_mapping.c b/memory_mapping.c >> index 481530a..55ac2b8 100644 >> --- a/memory_mapping.c >> +++ b/memory_mapping.c >> @@ -165,35 +165,39 @@ void memory_mapping_list_init(MemoryMappingList *list) >> QTAILQ_INIT(&list->head); >> } >> >> -static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) >> +typedef struct GetGuestMemoryMappingData { >> + MemoryMappingList *list; >> + int ret; >> +} GetGuestMemoryMappingData; >> + >> +static void qemu_get_one_guest_memory_mapping(CPUState *cpu, void *data) >> { >> - CPUArchState *env; >> + GetGuestMemoryMappingData *s = data; >> + int ret; >> >> - for (env = start_cpu; env != NULL; env = env->next_cpu) { >> - if (cpu_paging_enabled(ENV_GET_CPU(env))) { >> - return env; >> - } >> + if (!cpu_paging_enabled(cpu) || s->ret == -1) { >> + return; >> + } >> + ret = cpu_get_memory_mapping(cpu, s->list); >> + if (ret < 0) { >> + s->ret = -1; >> + } else { >> + s->ret = 0; >> } > > Does cpu_get_memory_mapping() ever return a positive value or a negative > value != -1 ? > > It it doesn't, I'd do: > > s->ret = cpu_get_memory_mapping(); > assert(s->ret == 0 || s->ret == -1); This no longer applies after returning an Error* from cpu_get_memory_mapping() instead. :) > >> - >> - return NULL; >> } >> >> int qemu_get_guest_memory_mapping(MemoryMappingList *list) >> { >> - CPUArchState *env, *first_paging_enabled_cpu; >> + GetGuestMemoryMappingData s = { >> + .list = list, >> + .ret = -2, >> + }; >> RAMBlock *block; >> ram_addr_t offset, length; >> - int ret; >> >> - first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); >> - if (first_paging_enabled_cpu) { >> - for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) { >> - ret = cpu_get_memory_mapping(ENV_GET_CPU(env), list); >> - if (ret < 0) { >> - return -1; >> - } >> - } >> - return 0; >> + qemu_for_each_cpu(qemu_get_one_guest_memory_mapping, &s); >> + if (s.ret != -2) { >> + return s.ret; >> } > > I see we ignore error in dump_init(). Doesn't matter today because > x86_cpu_get_memory_mapping() never fails. But as you're doing this cleanup, > shouldn't you add proper error handling? This patch is not needed for arm/s390x, so we can easily postpone it. I included it here for early feedback since my series building on this still needs to be tested and tweaked for bsd-user. I figure passing through Error** would be the logical follow-up here? Yet s.ret is being reused to check if any CPU provided any mappings at all - we could instead do two loops, one for checking if any CPU has paging enabled and then one passing out any Error* and propagating that. Thanks, Andreas
diff --git a/memory_mapping.c b/memory_mapping.c index 481530a..55ac2b8 100644 --- a/memory_mapping.c +++ b/memory_mapping.c @@ -165,35 +165,39 @@ void memory_mapping_list_init(MemoryMappingList *list) QTAILQ_INIT(&list->head); } -static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu) +typedef struct GetGuestMemoryMappingData { + MemoryMappingList *list; + int ret; +} GetGuestMemoryMappingData; + +static void qemu_get_one_guest_memory_mapping(CPUState *cpu, void *data) { - CPUArchState *env; + GetGuestMemoryMappingData *s = data; + int ret; - for (env = start_cpu; env != NULL; env = env->next_cpu) { - if (cpu_paging_enabled(ENV_GET_CPU(env))) { - return env; - } + if (!cpu_paging_enabled(cpu) || s->ret == -1) { + return; + } + ret = cpu_get_memory_mapping(cpu, s->list); + if (ret < 0) { + s->ret = -1; + } else { + s->ret = 0; } - - return NULL; } int qemu_get_guest_memory_mapping(MemoryMappingList *list) { - CPUArchState *env, *first_paging_enabled_cpu; + GetGuestMemoryMappingData s = { + .list = list, + .ret = -2, + }; RAMBlock *block; ram_addr_t offset, length; - int ret; - first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu); - if (first_paging_enabled_cpu) { - for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) { - ret = cpu_get_memory_mapping(ENV_GET_CPU(env), list); - if (ret < 0) { - return -1; - } - } - return 0; + qemu_for_each_cpu(qemu_get_one_guest_memory_mapping, &s); + if (s.ret != -2) { + return s.ret; } /*
Previously it would search for the first CPU with paging enabled and retrieve memory mappings from this and any following CPUs or return an error if that fails. Instead walk all CPUs and if paging is enabled retrieve the memory mappings. Fail only if retrieving memory mappings on a CPU with paging enabled fails. This not only allows to more easily use one qemu_for_each_cpu() instead of open-coding two CPU loops and drops find_first_paging_enabled_cpu() but removes the implicit assumption of heterogeneity between CPUs n..N in having paging enabled. Cc: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Andreas Färber <afaerber@suse.de> --- memory_mapping.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-)