Message ID | 20190814082452.28013-4-santosh@fossix.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Add bad pmem bad blocks to bad range | expand |
Context | Check | Description |
---|---|---|
snowpatch_ozlabs/apply_patch | warning | Failed to apply on branch next (da206bd46848568e1aaf35f00e2d78bf9bc94f95) |
snowpatch_ozlabs/apply_patch | fail | Failed to apply to any branch |
On Wed, Aug 14, 2019 at 6:25 PM Santosh Sivaraj <santosh@fossix.org> wrote: > > Subscribe to the MCE notification and add the physical address which > generated a memory error to nvdimm bad range. > > Signed-off-by: Santosh Sivaraj <santosh@fossix.org> > --- > arch/powerpc/platforms/pseries/papr_scm.c | 65 +++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c > index a5ac371a3f06..4d25c98a9835 100644 > --- a/arch/powerpc/platforms/pseries/papr_scm.c > +++ b/arch/powerpc/platforms/pseries/papr_scm.c > @@ -12,6 +12,8 @@ > #include <linux/libnvdimm.h> > #include <linux/platform_device.h> > #include <linux/delay.h> > +#include <linux/nd.h> > +#include <asm/mce.h> > > #include <asm/plpar_wrappers.h> > > @@ -39,8 +41,12 @@ struct papr_scm_priv { > struct resource res; > struct nd_region *region; > struct nd_interleave_set nd_set; > + struct list_head list; list is not a meaningful name. call it something more descriptive. > }; > > +LIST_HEAD(papr_nd_regions); > +DEFINE_MUTEX(papr_ndr_lock); Should this be a mutex or a spinlock? I don't know what context the mce notifier is called from, but if it's not sleepable then a mutex will cause problems. Did you test this with lockdep enabled? > + > static int drc_pmem_bind(struct papr_scm_priv *p) > { > unsigned long ret[PLPAR_HCALL_BUFSIZE]; > @@ -364,6 +370,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) > dev_info(dev, "Region registered with target node %d and online node %d", > target_nid, online_nid); > > + mutex_lock(&papr_ndr_lock); > + list_add_tail(&p->list, &papr_nd_regions); > + mutex_unlock(&papr_ndr_lock); > + Where's the matching remove when we unbind the driver? > return 0; > > err: nvdimm_bus_unregister(p->bus); > @@ -371,6 +381,60 @@ err: nvdimm_bus_unregister(p->bus); > return -ENXIO; > } > > +static int handle_mce_ue(struct notifier_block *nb, unsigned long val, > + void *data) > +{ > + struct machine_check_event *evt = data; > + struct papr_scm_priv *p; > + u64 phys_addr; > + > + if (evt->error_type != MCE_ERROR_TYPE_UE) > + return NOTIFY_DONE; > + > + if (list_empty(&papr_nd_regions)) > + return NOTIFY_DONE; > + > + phys_addr = evt->u.ue_error.physical_address + > + (evt->u.ue_error.effective_address & ~PAGE_MASK); Wait what? Why is physical_address page aligned, but effective_address not? Not a problem with this patch, but still, what the hell? > + if (!evt->u.ue_error.physical_address_provided || > + !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT))) > + return NOTIFY_DONE; > + > + mutex_lock(&papr_ndr_lock); > + list_for_each_entry(p, &papr_nd_regions, list) { > + struct resource res = p->res; > + u64 aligned_addr; > + > + if (res.start > phys_addr) > + continue; > + > + if (res.end < phys_addr) > + continue; surely there's a helper for this > + > + aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES); > + pr_debug("Add memory range (0x%llx -- 0x%llx) as bad range\n", > + aligned_addr, aligned_addr + L1_CACHE_BYTES); > + > + if (nvdimm_bus_add_badrange(p->bus, > + aligned_addr, L1_CACHE_BYTES)) > + pr_warn("Failed to add bad range (0x%llx -- 0x%llx)\n", > + aligned_addr, aligned_addr + L1_CACHE_BYTES); > + > + nvdimm_region_notify(p->region, > + NVDIMM_REVALIDATE_POISON); > + > + break; nit: you can avoid stacking indetation levels by breaking out of the loop as soon as you've found the region you're looking for. > + } > + mutex_unlock(&papr_ndr_lock); > + > + return NOTIFY_OK; > +} > + > +static struct notifier_block mce_ue_nb = { > + .notifier_call = handle_mce_ue > +}; > + > static int papr_scm_probe(struct platform_device *pdev) > { > struct device_node *dn = pdev->dev.of_node; > @@ -456,6 +520,7 @@ static int papr_scm_probe(struct platform_device *pdev) > goto err2; > > platform_set_drvdata(pdev, p); > + mce_register_notifier(&mce_ue_nb); Either get rid of the global region list and have a notifier block in each device's driver private data, or keep the global list and register the notifier in module_init(). Re-registering the notifier each time a seperate device is probed seems very sketchy. > return 0; > > -- > 2.21.0 >
"Oliver O'Halloran" <oohall@gmail.com> writes: > On Wed, Aug 14, 2019 at 6:25 PM Santosh Sivaraj <santosh@fossix.org> wrote: >> >> Subscribe to the MCE notification and add the physical address which >> generated a memory error to nvdimm bad range. >> >> Signed-off-by: Santosh Sivaraj <santosh@fossix.org> >> --- >> arch/powerpc/platforms/pseries/papr_scm.c | 65 +++++++++++++++++++++++ >> 1 file changed, 65 insertions(+) >> >> diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c >> index a5ac371a3f06..4d25c98a9835 100644 >> --- a/arch/powerpc/platforms/pseries/papr_scm.c >> +++ b/arch/powerpc/platforms/pseries/papr_scm.c >> @@ -12,6 +12,8 @@ >> #include <linux/libnvdimm.h> >> #include <linux/platform_device.h> >> #include <linux/delay.h> >> +#include <linux/nd.h> >> +#include <asm/mce.h> >> >> #include <asm/plpar_wrappers.h> >> >> @@ -39,8 +41,12 @@ struct papr_scm_priv { >> struct resource res; >> struct nd_region *region; >> struct nd_interleave_set nd_set; >> + struct list_head list; > > list is not a meaningful name. call it something more descriptive. > >> }; >> >> +LIST_HEAD(papr_nd_regions); >> +DEFINE_MUTEX(papr_ndr_lock); > > Should this be a mutex or a spinlock? I don't know what context the > mce notifier is called from, but if it's not sleepable then a mutex > will cause problems. Did you test this with lockdep enabled? This would be a mutex, we are called from a blocking notifier. > >> + >> static int drc_pmem_bind(struct papr_scm_priv *p) >> { >> unsigned long ret[PLPAR_HCALL_BUFSIZE]; >> @@ -364,6 +370,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) >> dev_info(dev, "Region registered with target node %d and online node %d", >> target_nid, online_nid); >> >> + mutex_lock(&papr_ndr_lock); >> + list_add_tail(&p->list, &papr_nd_regions); >> + mutex_unlock(&papr_ndr_lock); >> + > > Where's the matching remove when we unbind the driver? Missed it completely. Will fix it. > >> return 0; >>pp >> err: nvdimm_bus_unregister(p->bus); >> @@ -371,6 +381,60 @@ err: nvdimm_bus_unregister(p->bus); >> return -ENXIO; >> } >> >> +static int handle_mce_ue(struct notifier_block *nb, unsigned long val, >> + void *data) >> +{ >> + struct machine_check_event *evt = data; >> + struct papr_scm_priv *p; >> + u64 phys_addr; >> + >> + if (evt->error_type != MCE_ERROR_TYPE_UE) >> + return NOTIFY_DONE; >> + >> + if (list_empty(&papr_nd_regions)) >> + return NOTIFY_DONE; >> + >> + phys_addr = evt->u.ue_error.physical_address + >> + (evt->u.ue_error.effective_address & ~PAGE_MASK); > > Wait what? Why is physical_address page aligned, but effective_address > not? Not a problem with this patch, but still, what the hell? Not sure why, but its the way now. I can see if I can update it if it makes sense in a later patch. > >> + if (!evt->u.ue_error.physical_address_provided || >> + !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT))) >> + return NOTIFY_DONE; >> + >> + mutex_lock(&papr_ndr_lock); >> + list_for_each_entry(p, &papr_nd_regions, list) { >> + struct resource res = p->res; >> + u64 aligned_addr; >> + > >> + if (res.start > phys_addr) >> + continue; >> + >> + if (res.end < phys_addr) >> + continue; > > surely there's a helper for this > >> + >> + aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES); >> + pr_debug("Add memory range (0x%llx -- 0x%llx) as bad range\n", >> + aligned_addr, aligned_addr + L1_CACHE_BYTES); >> + >> + if (nvdimm_bus_add_badrange(p->bus, >> + aligned_addr, L1_CACHE_BYTES)) >> + pr_warn("Failed to add bad range (0x%llx -- 0x%llx)\n", >> + aligned_addr, aligned_addr + L1_CACHE_BYTES); >> + >> + nvdimm_region_notify(p->region, >> + NVDIMM_REVALIDATE_POISON); >> + >> + break; > > nit: you can avoid stacking indetation levels by breaking out of the > loop as soon as you've found the region you're looking for. True. > >> + } >> + mutex_unlock(&papr_ndr_lock); >> + >> + return NOTIFY_OK; >> +} >> + >> +static struct notifier_block mce_ue_nb = { >> + .notifier_call = handle_mce_ue >> +}; >> + >> static int papr_scm_probe(struct platform_device *pdev) >> { >> struct device_node *dn = pdev->dev.of_node; >> @@ -456,6 +520,7 @@ static int papr_scm_probe(struct platform_device *pdev) >> goto err2; >> >> platform_set_drvdata(pdev, p); >> + mce_register_notifier(&mce_ue_nb); > > Either get rid of the global region list and have a notifier block in > each device's driver private data, or keep the global list and > register the notifier in module_init(). Re-registering the notifier > each time a seperate device is probed seems very sketchy. Registering the notifier in the init is simpler. I will change it. > >> return 0; >> >> -- >> 2.21.0 p>>
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index a5ac371a3f06..4d25c98a9835 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -12,6 +12,8 @@ #include <linux/libnvdimm.h> #include <linux/platform_device.h> #include <linux/delay.h> +#include <linux/nd.h> +#include <asm/mce.h> #include <asm/plpar_wrappers.h> @@ -39,8 +41,12 @@ struct papr_scm_priv { struct resource res; struct nd_region *region; struct nd_interleave_set nd_set; + struct list_head list; }; +LIST_HEAD(papr_nd_regions); +DEFINE_MUTEX(papr_ndr_lock); + static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; @@ -364,6 +370,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) dev_info(dev, "Region registered with target node %d and online node %d", target_nid, online_nid); + mutex_lock(&papr_ndr_lock); + list_add_tail(&p->list, &papr_nd_regions); + mutex_unlock(&papr_ndr_lock); + return 0; err: nvdimm_bus_unregister(p->bus); @@ -371,6 +381,60 @@ err: nvdimm_bus_unregister(p->bus); return -ENXIO; } +static int handle_mce_ue(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct machine_check_event *evt = data; + struct papr_scm_priv *p; + u64 phys_addr; + + if (evt->error_type != MCE_ERROR_TYPE_UE) + return NOTIFY_DONE; + + if (list_empty(&papr_nd_regions)) + return NOTIFY_DONE; + + phys_addr = evt->u.ue_error.physical_address + + (evt->u.ue_error.effective_address & ~PAGE_MASK); + + if (!evt->u.ue_error.physical_address_provided || + !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT))) + return NOTIFY_DONE; + + mutex_lock(&papr_ndr_lock); + list_for_each_entry(p, &papr_nd_regions, list) { + struct resource res = p->res; + u64 aligned_addr; + + if (res.start > phys_addr) + continue; + + if (res.end < phys_addr) + continue; + + aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES); + pr_debug("Add memory range (0x%llx -- 0x%llx) as bad range\n", + aligned_addr, aligned_addr + L1_CACHE_BYTES); + + if (nvdimm_bus_add_badrange(p->bus, + aligned_addr, L1_CACHE_BYTES)) + pr_warn("Failed to add bad range (0x%llx -- 0x%llx)\n", + aligned_addr, aligned_addr + L1_CACHE_BYTES); + + nvdimm_region_notify(p->region, + NVDIMM_REVALIDATE_POISON); + + break; + } + mutex_unlock(&papr_ndr_lock); + + return NOTIFY_OK; +} + +static struct notifier_block mce_ue_nb = { + .notifier_call = handle_mce_ue +}; + static int papr_scm_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; @@ -456,6 +520,7 @@ static int papr_scm_probe(struct platform_device *pdev) goto err2; platform_set_drvdata(pdev, p); + mce_register_notifier(&mce_ue_nb); return 0;
Subscribe to the MCE notification and add the physical address which generated a memory error to nvdimm bad range. Signed-off-by: Santosh Sivaraj <santosh@fossix.org> --- arch/powerpc/platforms/pseries/papr_scm.c | 65 +++++++++++++++++++++++ 1 file changed, 65 insertions(+)