diff mbox series

[U-Boot] nvme: Fix PRP Offset Invalid

Message ID 20190821140905.20856-1-awilliams@marvell.com
State Changes Requested
Headers show
Series [U-Boot] nvme: Fix PRP Offset Invalid | expand

Commit Message

Aaron Williams Aug. 21, 2019, 2:09 p.m. UTC
From: Aaron Williams <aaron.williams@cavium.com>

When large writes take place I saw a Samsung EVO 970+ return a status
value of 0x13, PRP Offset Invalid.  I tracked this down to the
improper handling of PRP entries.  The blocks the PRP entries are
placed in cannot cross a page boundary and thus should be allocated
on page boundaries.  This is how the Linux kernel driver works.

With this patch, the PRP pool is allocated on a page boundary and
other than the very first allocation, the pool size is a multiple of
the page size.  Each page can hold (4096 / 8) - 1 entries since the
last entry must point to the next page in the pool.

Change-Id: I8df66c87d6a6105da556d327d4cc5148e444d20e
Signed-off-by: Aaron Williams <awilliams@marvell.com>
---
 drivers/nvme/nvme.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

Comments

Bin Meng Aug. 22, 2019, 1:40 a.m. UTC | #1
Hi Aaron,

On Wed, Aug 21, 2019 at 10:09 PM Aaron Williams <awilliams@marvell.com> wrote:
>
> From: Aaron Williams <aaron.williams@cavium.com>
>
> When large writes take place I saw a Samsung EVO 970+ return a status
> value of 0x13, PRP Offset Invalid.  I tracked this down to the
> improper handling of PRP entries.  The blocks the PRP entries are
> placed in cannot cross a page boundary and thus should be allocated
> on page boundaries.  This is how the Linux kernel driver works.
>
> With this patch, the PRP pool is allocated on a page boundary and
> other than the very first allocation, the pool size is a multiple of
> the page size.  Each page can hold (4096 / 8) - 1 entries since the
> last entry must point to the next page in the pool.
>
> Change-Id: I8df66c87d6a6105da556d327d4cc5148e444d20e
> Signed-off-by: Aaron Williams <awilliams@marvell.com>
> ---
>  drivers/nvme/nvme.c | 21 +++++++++++++--------
>  1 file changed, 13 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
> index 7008a54a6d..71ea226820 100644
> --- a/drivers/nvme/nvme.c
> +++ b/drivers/nvme/nvme.c
> @@ -74,6 +74,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
>         u64 *prp_pool;
>         int length = total_len;
>         int i, nprps;
> +       u32 prps_per_page = (page_size >> 3) - 1;
> +       u32 num_pages;
> +
>         length -= (page_size - offset);
>
>         if (length <= 0) {
> @@ -90,15 +93,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
>         }
>
>         nprps = DIV_ROUND_UP(length, page_size);
> +       num_pages = DIV_ROUND_UP(nprps, prps_per_page);
>
>         if (nprps > dev->prp_entry_num) {
>                 free(dev->prp_pool);
> -               dev->prp_pool = malloc(nprps << 3);
> +               dev->prp_pool = memalign(page_size, num_pages * page_size);
>                 if (!dev->prp_pool) {
>                         printf("Error: malloc prp_pool fail\n");
>                         return -ENOMEM;
>                 }
> -               dev->prp_entry_num = nprps;
> +               dev->prp_entry_num = ((page_size >> 3) - 1) * num_pages;

This should be: dev->prp_entry_num = prps_per_page * num_pages;

When you respin the patch, please add the version number in the email
title so that we can have a better track. Thanks!

>         }

[snip]

Regards,
Bin
diff mbox series

Patch

diff --git a/drivers/nvme/nvme.c b/drivers/nvme/nvme.c
index 7008a54a6d..71ea226820 100644
--- a/drivers/nvme/nvme.c
+++ b/drivers/nvme/nvme.c
@@ -74,6 +74,9 @@  static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
 	u64 *prp_pool;
 	int length = total_len;
 	int i, nprps;
+	u32 prps_per_page = (page_size >> 3) - 1;
+	u32 num_pages;
+
 	length -= (page_size - offset);
 
 	if (length <= 0) {
@@ -90,15 +93,16 @@  static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2,
 	}
 
 	nprps = DIV_ROUND_UP(length, page_size);
+	num_pages = DIV_ROUND_UP(nprps, prps_per_page);
 
 	if (nprps > dev->prp_entry_num) {
 		free(dev->prp_pool);
-		dev->prp_pool = malloc(nprps << 3);
+		dev->prp_pool = memalign(page_size, num_pages * page_size);
 		if (!dev->prp_pool) {
 			printf("Error: malloc prp_pool fail\n");
 			return -ENOMEM;
 		}
-		dev->prp_entry_num = nprps;
+		dev->prp_entry_num = ((page_size >> 3) - 1) * num_pages;
 	}
 
 	prp_pool = dev->prp_pool;
@@ -791,12 +795,6 @@  static int nvme_probe(struct udevice *udev)
 	}
 	memset(ndev->queues, 0, NVME_Q_NUM * sizeof(struct nvme_queue *));
 
-	ndev->prp_pool = malloc(MAX_PRP_POOL);
-	if (!ndev->prp_pool) {
-		ret = -ENOMEM;
-		printf("Error: %s: Out of memory!\n", udev->name);
-		goto free_nvme;
-	}
 	ndev->prp_entry_num = MAX_PRP_POOL >> 3;
 
 	ndev->cap = nvme_readq(&ndev->bar->cap);
@@ -808,6 +806,13 @@  static int nvme_probe(struct udevice *udev)
 	if (ret)
 		goto free_queue;
 
+	ndev->prp_pool = memalign(ndev->page_size, MAX_PRP_POOL);
+	if (!ndev->prp_pool) {
+		ret = -ENOMEM;
+		printf("Error: %s: Out of memory!\n", udev->name);
+		goto free_nvme;
+	}
+
 	ret = nvme_setup_io_queues(ndev);
 	if (ret)
 		goto free_queue;