Message ID | a0cb0f8f9bb3d68dea779cc49c0565eeb43b0ca8.1632816074.git.stefan@agner.ch |
---|---|
State | Changes Requested |
Delegated to: | Tom Rini |
Headers | show |
Series | nvme: invalidate correct memory range after read | expand |
On Tue, 28 Sep 2021 10:01:40 +0200 Stefan Agner <stefan@agner.ch> wrote: > The current code invalidates the range after the read buffer since the > buffer pointer gets incremented in the read loop. Use a temporary > pointer to make sure we have a pristine pointer to invalidate the > correct memory range after read. Ah, good catch! This looks good, just one nit below: > Fixes: 704e040a51d2 ("nvme: Apply cache operations on the DMA buffers") > Signed-off-by: Stefan Agner <stefan@agner.ch> > --- > > drivers/nvme/nvme.c | 7 ++++--- > 1 file changed, 4 insertions(+), 3 deletions(-) > > diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c > index f6465ea7f4..354513ad30 100644 > --- a/drivers/nvme/nvme.c > +++ b/drivers/nvme/nvme.c > @@ -743,6 +743,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, > u64 prp2; > u64 total_len = blkcnt << desc->log2blksz; > u64 temp_len = total_len; > + void *temp_buffer = buffer; You could make this an unsigned long (or better uintptr_t), then lose all the casts below and avoid the void ptr arithmetic. But it works either way, so: Reviewed-by: Andre Przywara <andre.przywara@arm.com> Cheers, Andre > > u64 slba = blknr; > u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); > @@ -770,19 +771,19 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, > } > > if (nvme_setup_prps(dev, &prp2, > - lbas << ns->lba_shift, (ulong)buffer)) > + lbas << ns->lba_shift, (ulong)temp_buffer)) > return -EIO; > c.rw.slba = cpu_to_le64(slba); > slba += lbas; > c.rw.length = cpu_to_le16(lbas - 1); > - c.rw.prp1 = cpu_to_le64((ulong)buffer); > + c.rw.prp1 = cpu_to_le64((ulong)temp_buffer); > c.rw.prp2 = cpu_to_le64(prp2); > status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q], > &c, NULL, IO_TIMEOUT); > if (status) > break; > temp_len -= (u32)lbas << ns->lba_shift; > - buffer += lbas << ns->lba_shift; > + temp_buffer += lbas << ns->lba_shift; > } > > if (read)
On 2021-09-30 18:09, Andre Przywara wrote: > On Tue, 28 Sep 2021 10:01:40 +0200 > Stefan Agner <stefan@agner.ch> wrote: > >> The current code invalidates the range after the read buffer since the >> buffer pointer gets incremented in the read loop. Use a temporary >> pointer to make sure we have a pristine pointer to invalidate the >> correct memory range after read. > > Ah, good catch! It did actually create issues in real world where sometimes it would just not recognize a file system properly when using the ls command. > This looks good, just one nit below: > >> Fixes: 704e040a51d2 ("nvme: Apply cache operations on the DMA buffers") >> Signed-off-by: Stefan Agner <stefan@agner.ch> >> --- >> >> drivers/nvme/nvme.c | 7 ++++--- >> 1 file changed, 4 insertions(+), 3 deletions(-) >> >> diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c >> index f6465ea7f4..354513ad30 100644 >> --- a/drivers/nvme/nvme.c >> +++ b/drivers/nvme/nvme.c >> @@ -743,6 +743,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, >> u64 prp2; >> u64 total_len = blkcnt << desc->log2blksz; >> u64 temp_len = total_len; >> + void *temp_buffer = buffer; > > You could make this an unsigned long (or better uintptr_t), then lose all > the casts below and avoid the void ptr arithmetic. Ok, I'll send an update shortly. > > But it works either way, so: > > Reviewed-by: Andre Przywara <andre.przywara@arm.com> Thanks for your review. -- Stefan > > Cheers, > Andre > > >> >> u64 slba = blknr; >> u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); >> @@ -770,19 +771,19 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, >> } >> >> if (nvme_setup_prps(dev, &prp2, >> - lbas << ns->lba_shift, (ulong)buffer)) >> + lbas << ns->lba_shift, (ulong)temp_buffer)) >> return -EIO; >> c.rw.slba = cpu_to_le64(slba); >> slba += lbas; >> c.rw.length = cpu_to_le16(lbas - 1); >> - c.rw.prp1 = cpu_to_le64((ulong)buffer); >> + c.rw.prp1 = cpu_to_le64((ulong)temp_buffer); >> c.rw.prp2 = cpu_to_le64(prp2); >> status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q], >> &c, NULL, IO_TIMEOUT); >> if (status) >> break; >> temp_len -= (u32)lbas << ns->lba_shift; >> - buffer += lbas << ns->lba_shift; >> + temp_buffer += lbas << ns->lba_shift; >> } >> >> if (read)
diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c index f6465ea7f4..354513ad30 100644 --- a/drivers/nvme/nvme.c +++ b/drivers/nvme/nvme.c @@ -743,6 +743,7 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, u64 prp2; u64 total_len = blkcnt << desc->log2blksz; u64 temp_len = total_len; + void *temp_buffer = buffer; u64 slba = blknr; u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); @@ -770,19 +771,19 @@ static ulong nvme_blk_rw(struct udevice *udev, lbaint_t blknr, } if (nvme_setup_prps(dev, &prp2, - lbas << ns->lba_shift, (ulong)buffer)) + lbas << ns->lba_shift, (ulong)temp_buffer)) return -EIO; c.rw.slba = cpu_to_le64(slba); slba += lbas; c.rw.length = cpu_to_le16(lbas - 1); - c.rw.prp1 = cpu_to_le64((ulong)buffer); + c.rw.prp1 = cpu_to_le64((ulong)temp_buffer); c.rw.prp2 = cpu_to_le64(prp2); status = nvme_submit_sync_cmd(dev->queues[NVME_IO_Q], &c, NULL, IO_TIMEOUT); if (status) break; temp_len -= (u32)lbas << ns->lba_shift; - buffer += lbas << ns->lba_shift; + temp_buffer += lbas << ns->lba_shift; } if (read)
The current code invalidates the range after the read buffer since the buffer pointer gets incremented in the read loop. Use a temporary pointer to make sure we have a pristine pointer to invalidate the correct memory range after read. Fixes: 704e040a51d2 ("nvme: Apply cache operations on the DMA buffers") Signed-off-by: Stefan Agner <stefan@agner.ch> --- drivers/nvme/nvme.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)