Message ID | 20190724084638.24982-3-npiggin@gmail.com (mailing list archive) |
---|---|
State | Accepted |
Commit | 10c4bd7cd28e77aeb8cfa65b23cb3c632ede2a49 |
Headers | show |
Series | [1/5] powerpc/64s/radix: Fix memory hotplug section page table creation | expand |
Context | Check | Description |
---|---|---|
snowpatch_ozlabs/apply_patch | success | Successfully applied on branch next (f3365d1a959d5c6527efe3d38276acc9b58e3f3f) |
snowpatch_ozlabs/checkpatch | warning | total: 0 errors, 0 warnings, 2 checks, 61 lines checked |
Nicholas Piggin <npiggin@gmail.com> writes: > The alloc_pages_node return value should be tested for failure > before being passed to page_address. > Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> This need Fixes: tag? It fix a real crash unlike the other patch in this series? > Cc: Madhavan Srinivasan <maddy@linux.vnet.ibm.com> > Tested-by: Anju T Sudhakar <anju@linux.vnet.ibm.com> > Signed-off-by: Nicholas Piggin <npiggin@gmail.com> > --- > arch/powerpc/perf/imc-pmu.c | 29 ++++++++++++++++++----------- > 1 file changed, 18 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c > index dea243185ea4..cb50a9e1fd2d 100644 > --- a/arch/powerpc/perf/imc-pmu.c > +++ b/arch/powerpc/perf/imc-pmu.c > @@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size) > { > int nid, rc = 0, core_id = (cpu / threads_per_core); > struct imc_mem_info *mem_info; > + struct page *page; > > /* > * alloc_pages_node() will allocate memory for core in the > @@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size) > mem_info->id = core_id; > > /* We need only vbase for core counters */ > - mem_info->vbase = page_address(alloc_pages_node(nid, > - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | > - __GFP_NOWARN, get_order(size))); > - if (!mem_info->vbase) > + page = alloc_pages_node(nid, > + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | > + __GFP_NOWARN, get_order(size)); > + if (!page) > return -ENOMEM; > + mem_info->vbase = page_address(page); > > /* Init the mutex */ > core_imc_refc[core_id].id = core_id; > @@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size) > int nid = cpu_to_node(cpu_id); > > if (!local_mem) { > + struct page *page; > /* > * This case could happen only once at start, since we dont > * free the memory in cpu offline path. > */ > - local_mem = page_address(alloc_pages_node(nid, > + page = alloc_pages_node(nid, > GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | > - __GFP_NOWARN, get_order(size))); > - if (!local_mem) > + __GFP_NOWARN, get_order(size)); > + if (!page) > return -ENOMEM; > + local_mem = page_address(page); > > per_cpu(thread_imc_mem, cpu_id) = local_mem; > } > @@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size) > int core_id = (cpu_id / threads_per_core); > > if (!local_mem) { > - local_mem = page_address(alloc_pages_node(phys_id, > - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | > - __GFP_NOWARN, get_order(size))); > - if (!local_mem) > + struct page *page; > + > + page = alloc_pages_node(phys_id, > + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | > + __GFP_NOWARN, get_order(size)); > + if (!page) > return -ENOMEM; > + local_mem = page_address(page); > per_cpu(trace_imc_mem, cpu_id) = local_mem; > > /* Initialise the counters for trace mode */ > -- > 2.22.0
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index dea243185ea4..cb50a9e1fd2d 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size) { int nid, rc = 0, core_id = (cpu / threads_per_core); struct imc_mem_info *mem_info; + struct page *page; /* * alloc_pages_node() will allocate memory for core in the @@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size) mem_info->id = core_id; /* We need only vbase for core counters */ - mem_info->vbase = page_address(alloc_pages_node(nid, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!mem_info->vbase) + page = alloc_pages_node(nid, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + mem_info->vbase = page_address(page); /* Init the mutex */ core_imc_refc[core_id].id = core_id; @@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size) int nid = cpu_to_node(cpu_id); if (!local_mem) { + struct page *page; /* * This case could happen only once at start, since we dont * free the memory in cpu offline path. */ - local_mem = page_address(alloc_pages_node(nid, + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(thread_imc_mem, cpu_id) = local_mem; } @@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size) int core_id = (cpu_id / threads_per_core); if (!local_mem) { - local_mem = page_address(alloc_pages_node(phys_id, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | - __GFP_NOWARN, get_order(size))); - if (!local_mem) + struct page *page; + + page = alloc_pages_node(phys_id, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | + __GFP_NOWARN, get_order(size)); + if (!page) return -ENOMEM; + local_mem = page_address(page); per_cpu(trace_imc_mem, cpu_id) = local_mem; /* Initialise the counters for trace mode */