Message ID | 20220927173845.2293378-3-sgzhang@google.com |
---|---|
State | Changes Requested |
Headers | show |
Series | mtd: mtdoops: change log and erase functions | expand |
Hi Ray, sgzhang@google.com wrote on Tue, 27 Sep 2022 17:38:45 +0000: > The panic function disables the local interrupts, preemption, and all > other processors. When the invoked mtdoops needs to erase a used page, > calling schedule_work() to do it will not work. Instead, just call mtd > erase function immediately. > > Tested: > ~# echo c > /proc/sysrq-trigger > [ 171.654759] sysrq: Trigger a crash > [ 171.658325] Kernel panic - not syncing: sysrq triggered crash > ...... > [ 172.406423] mtdoops: not ready 34, 35 (erase immediately) > [ 172.432285] mtdoops: ready 34, 35 > [ 172.435633] Rebooting in 10 seconds.. > > Signed-off-by: Ray Zhang <sgzhang@google.com> > --- > drivers/mtd/mtdoops.c | 66 ++++++++++++++++++++++++++----------------- > 1 file changed, 40 insertions(+), 26 deletions(-) > > diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c > index 84b21be347f6..73c6a55eb391 100644 > --- a/drivers/mtd/mtdoops.c > +++ b/drivers/mtd/mtdoops.c > @@ -106,29 +106,8 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) > return 0; > } > > -static void mtdoops_inc_counter(struct mtdoops_context *cxt) > +static void mtdoops_erase(struct mtdoops_context *cxt) > { > - cxt->nextpage++; > - if (cxt->nextpage >= cxt->oops_pages) > - cxt->nextpage = 0; > - cxt->nextcount++; > - if (cxt->nextcount == 0xffffffff) > - cxt->nextcount = 0; > - > - if (page_is_used(cxt, cxt->nextpage)) { > - schedule_work(&cxt->work_erase); > - return; > - } > - > - pr_debug("mtdoops: ready %d, %d (no erase)\n", > - cxt->nextpage, cxt->nextcount); > -} Could you please split this commit, I would like to see: * moving the code around, creating the schedule indirection (no functional change) * fixing the problem with a minimal diff > - > -/* Scheduled work - when we can't proceed without erasing a block */ > -static void mtdoops_workfunc_erase(struct work_struct *work) > -{ > - struct mtdoops_context *cxt = > - container_of(work, struct mtdoops_context, work_erase); > struct mtd_info *mtd = cxt->mtd; > int i = 0, j, ret, mod; > > @@ -166,8 +145,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work) > ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); > > if (ret >= 0) { > - pr_debug("mtdoops: ready %d, %d\n", > - cxt->nextpage, cxt->nextcount); > + pr_notice("mtdoops: ready %d, %d\n", > + cxt->nextpage, cxt->nextcount); Seems unrelated :) > return; > } > > @@ -181,6 +160,41 @@ static void mtdoops_workfunc_erase(struct work_struct *work) > goto badblock; > } > > +/* Scheduled work - when we can't proceed without erasing a block */ > +static void mtdoops_workfunc_erase(struct work_struct *work) > +{ > + struct mtdoops_context *cxt = > + container_of(work, struct mtdoops_context, work_erase); > + > + mtdoops_erase(cxt); > +} > + > +static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic) > +{ > + cxt->nextpage++; > + if (cxt->nextpage >= cxt->oops_pages) > + cxt->nextpage = 0; > + cxt->nextcount++; > + if (cxt->nextcount == 0xffffffff) > + cxt->nextcount = 0; > + > + if (page_is_used(cxt, cxt->nextpage)) { > + pr_notice("mtdoops: not ready %d, %d (erase %s)\n", > + cxt->nextpage, cxt->nextcount, > + panic ? "immediately" : "scheduled"); > + if (panic) { > + /* In case of panic, erase immediately */ > + mtdoops_erase(cxt); > + } else { > + /* Otherwise, schedule work to erase it "nicely" */ > + schedule_work(&cxt->work_erase); > + } > + } else { > + pr_notice("mtdoops: ready %d, %d (no erase)\n", > + cxt->nextpage, cxt->nextcount); > + } > +} > + > static void mtdoops_write(struct mtdoops_context *cxt, int panic) > { > struct mtd_info *mtd = cxt->mtd; > @@ -214,7 +228,7 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) > mark_page_used(cxt, cxt->nextpage); > memset(cxt->oops_buf, 0xff, record_size); > > - mtdoops_inc_counter(cxt); > + mtdoops_inc_counter(cxt, panic); > out: > clear_bit(0, &cxt->oops_buf_busy); > } > @@ -279,7 +293,7 @@ static void find_next_position(struct mtdoops_context *cxt) > cxt->nextcount = maxcount; > } > > - mtdoops_inc_counter(cxt); > + mtdoops_inc_counter(cxt, 0); > } > > static void mtdoops_do_dump(struct kmsg_dumper *dumper, Otherwise lgtm. Thanks, Miquèl
Hi Miquèl, Thanks for your review comments w.r.t [PATCH v1], which will be complied by in next patch version [PATCH v2]. Regards, Ray
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 84b21be347f6..73c6a55eb391 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -106,29 +106,8 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) return 0; } -static void mtdoops_inc_counter(struct mtdoops_context *cxt) +static void mtdoops_erase(struct mtdoops_context *cxt) { - cxt->nextpage++; - if (cxt->nextpage >= cxt->oops_pages) - cxt->nextpage = 0; - cxt->nextcount++; - if (cxt->nextcount == 0xffffffff) - cxt->nextcount = 0; - - if (page_is_used(cxt, cxt->nextpage)) { - schedule_work(&cxt->work_erase); - return; - } - - pr_debug("mtdoops: ready %d, %d (no erase)\n", - cxt->nextpage, cxt->nextcount); -} - -/* Scheduled work - when we can't proceed without erasing a block */ -static void mtdoops_workfunc_erase(struct work_struct *work) -{ - struct mtdoops_context *cxt = - container_of(work, struct mtdoops_context, work_erase); struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; @@ -166,8 +145,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { - pr_debug("mtdoops: ready %d, %d\n", - cxt->nextpage, cxt->nextcount); + pr_notice("mtdoops: ready %d, %d\n", + cxt->nextpage, cxt->nextcount); return; } @@ -181,6 +160,41 @@ static void mtdoops_workfunc_erase(struct work_struct *work) goto badblock; } +/* Scheduled work - when we can't proceed without erasing a block */ +static void mtdoops_workfunc_erase(struct work_struct *work) +{ + struct mtdoops_context *cxt = + container_of(work, struct mtdoops_context, work_erase); + + mtdoops_erase(cxt); +} + +static void mtdoops_inc_counter(struct mtdoops_context *cxt, int panic) +{ + cxt->nextpage++; + if (cxt->nextpage >= cxt->oops_pages) + cxt->nextpage = 0; + cxt->nextcount++; + if (cxt->nextcount == 0xffffffff) + cxt->nextcount = 0; + + if (page_is_used(cxt, cxt->nextpage)) { + pr_notice("mtdoops: not ready %d, %d (erase %s)\n", + cxt->nextpage, cxt->nextcount, + panic ? "immediately" : "scheduled"); + if (panic) { + /* In case of panic, erase immediately */ + mtdoops_erase(cxt); + } else { + /* Otherwise, schedule work to erase it "nicely" */ + schedule_work(&cxt->work_erase); + } + } else { + pr_notice("mtdoops: ready %d, %d (no erase)\n", + cxt->nextpage, cxt->nextcount); + } +} + static void mtdoops_write(struct mtdoops_context *cxt, int panic) { struct mtd_info *mtd = cxt->mtd; @@ -214,7 +228,7 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) mark_page_used(cxt, cxt->nextpage); memset(cxt->oops_buf, 0xff, record_size); - mtdoops_inc_counter(cxt); + mtdoops_inc_counter(cxt, panic); out: clear_bit(0, &cxt->oops_buf_busy); } @@ -279,7 +293,7 @@ static void find_next_position(struct mtdoops_context *cxt) cxt->nextcount = maxcount; } - mtdoops_inc_counter(cxt); + mtdoops_inc_counter(cxt, 0); } static void mtdoops_do_dump(struct kmsg_dumper *dumper,
The panic function disables the local interrupts, preemption, and all other processors. When the invoked mtdoops needs to erase a used page, calling schedule_work() to do it will not work. Instead, just call mtd erase function immediately. Tested: ~# echo c > /proc/sysrq-trigger [ 171.654759] sysrq: Trigger a crash [ 171.658325] Kernel panic - not syncing: sysrq triggered crash ...... [ 172.406423] mtdoops: not ready 34, 35 (erase immediately) [ 172.432285] mtdoops: ready 34, 35 [ 172.435633] Rebooting in 10 seconds.. Signed-off-by: Ray Zhang <sgzhang@google.com> --- drivers/mtd/mtdoops.c | 66 ++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 26 deletions(-)