diff mbox series

[v6,33/42] nvme: use preallocated qsg/iov in nvme_dma_prp

Message ID 20200316142928.153431-34-its@irrelevant.dk
State New
Headers show
Series nvme: support NVMe v1.3d, SGLs and multiple namespaces | expand

Commit Message

Klaus Jensen March 16, 2020, 2:29 p.m. UTC
From: Klaus Jensen <k.jensen@samsung.com>

Since clean up of the request qsg/iov has been moved to the common
nvme_enqueue_req_completion function, there is no need to use a stack
allocated qsg/iov in nvme_dma_prp.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Acked-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 hw/block/nvme.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

Comments

Maxim Levitsky March 25, 2020, 10:58 a.m. UTC | #1
On Mon, 2020-03-16 at 07:29 -0700, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Since clean up of the request qsg/iov has been moved to the common
> nvme_enqueue_req_completion function, there is no need to use a stack
> allocated qsg/iov in nvme_dma_prp.
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> Acked-by: Keith Busch <kbusch@kernel.org>
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
> ---
>  hw/block/nvme.c | 18 ++++++------------
>  1 file changed, 6 insertions(+), 12 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 817384e3b1a9..15ca2417af04 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -321,45 +321,39 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
>                               uint64_t prp1, uint64_t prp2, DMADirection dir,
>                               NvmeRequest *req)
>  {
> -    QEMUSGList qsg;
> -    QEMUIOVector iov;
>      uint16_t status = NVME_SUCCESS;
>  
> -    status = nvme_map_prp(n, &qsg, &iov, prp1, prp2, len, req);
> +    status = nvme_map_prp(n, &req->qsg, &req->iov, prp1, prp2, len, req);
>      if (status) {
>          return status;
>      }
>  
> -    if (qsg.nsg > 0) {
> +    if (req->qsg.nsg > 0) {
>          uint64_t residual;
>  
>          if (dir == DMA_DIRECTION_TO_DEVICE) {
> -            residual = dma_buf_write(ptr, len, &qsg);
> +            residual = dma_buf_write(ptr, len, &req->qsg);
>          } else {
> -            residual = dma_buf_read(ptr, len, &qsg);
> +            residual = dma_buf_read(ptr, len, &req->qsg);
>          }
>  
>          if (unlikely(residual)) {
>              trace_nvme_dev_err_invalid_dma();
>              status = NVME_INVALID_FIELD | NVME_DNR;
>          }
> -
> -        qemu_sglist_destroy(&qsg);
>      } else {
>          size_t bytes;
>  
>          if (dir == DMA_DIRECTION_TO_DEVICE) {
> -            bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
> +            bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
>          } else {
> -            bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
> +            bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
>          }
>  
>          if (unlikely(bytes != len)) {
>              trace_nvme_dev_err_invalid_dma();
>              status = NVME_INVALID_FIELD | NVME_DNR;
>          }
> -
> -        qemu_iovec_destroy(&iov);
>      }
>  
>      return status;
Only minor changes from the previous version,
so 
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

Best regards,
	Maxim Levitsky
diff mbox series

Patch

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 817384e3b1a9..15ca2417af04 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -321,45 +321,39 @@  static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
                              uint64_t prp1, uint64_t prp2, DMADirection dir,
                              NvmeRequest *req)
 {
-    QEMUSGList qsg;
-    QEMUIOVector iov;
     uint16_t status = NVME_SUCCESS;
 
-    status = nvme_map_prp(n, &qsg, &iov, prp1, prp2, len, req);
+    status = nvme_map_prp(n, &req->qsg, &req->iov, prp1, prp2, len, req);
     if (status) {
         return status;
     }
 
-    if (qsg.nsg > 0) {
+    if (req->qsg.nsg > 0) {
         uint64_t residual;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            residual = dma_buf_write(ptr, len, &qsg);
+            residual = dma_buf_write(ptr, len, &req->qsg);
         } else {
-            residual = dma_buf_read(ptr, len, &qsg);
+            residual = dma_buf_read(ptr, len, &req->qsg);
         }
 
         if (unlikely(residual)) {
             trace_nvme_dev_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_sglist_destroy(&qsg);
     } else {
         size_t bytes;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
         } else {
-            bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
         }
 
         if (unlikely(bytes != len)) {
             trace_nvme_dev_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_iovec_destroy(&iov);
     }
 
     return status;