diff mbox series

[16/16] hw/block/nvme: use preallocated qsg/iov in nvme_dma_prp

Message ID 20200720113748.322965-17-its@irrelevant.dk (mailing list archive)
State New, archived
Headers show
Series hw/block/nvme: dma handling and address mapping cleanup | expand

Commit Message

Klaus Jensen July 20, 2020, 11:37 a.m. UTC
From: Klaus Jensen <k.jensen@samsung.com>

Since clean up of the request qsg/iov is now always done post-use, there
is no need to use a stack-allocated qsg/iov in nvme_dma_prp.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Acked-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 hw/block/nvme.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

Comments

Minwoo Im July 29, 2020, 4:15 p.m. UTC | #1
On 20-07-20 13:37:48, Klaus Jensen wrote:
> From: Klaus Jensen <k.jensen@samsung.com>
> 
> Since clean up of the request qsg/iov is now always done post-use, there
> is no need to use a stack-allocated qsg/iov in nvme_dma_prp.
> 
> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> Acked-by: Keith Busch <kbusch@kernel.org>
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>

> ---
>  hw/block/nvme.c | 18 ++++++------------
>  1 file changed, 6 insertions(+), 12 deletions(-)
> 
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 0b3dceccc89b..b6da5a9f3fc6 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -381,45 +381,39 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
>                               uint64_t prp1, uint64_t prp2, DMADirection dir,
>                               NvmeRequest *req)
>  {
> -    QEMUSGList qsg;
> -    QEMUIOVector iov;
>      uint16_t status = NVME_SUCCESS;
>  
> -    status = nvme_map_prp(n, &qsg, &iov, prp1, prp2, len, req);
> +    status = nvme_map_prp(n, &req->qsg, &req->iov, prp1, prp2, len, req);

After this change, can we make nvme_map_prp() just receive
NvmeRequest *req without &req->qsg, &req->iov by retrieve them from
inside of the nvme_map_prp()?
Klaus Jensen July 29, 2020, 7:57 p.m. UTC | #2
On Jul 30 01:15, Minwoo Im wrote:
> On 20-07-20 13:37:48, Klaus Jensen wrote:
> > From: Klaus Jensen <k.jensen@samsung.com>
> > 
> > Since clean up of the request qsg/iov is now always done post-use, there
> > is no need to use a stack-allocated qsg/iov in nvme_dma_prp.
> > 
> > Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
> > Acked-by: Keith Busch <kbusch@kernel.org>
> > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
> 
> > ---
> >  hw/block/nvme.c | 18 ++++++------------
> >  1 file changed, 6 insertions(+), 12 deletions(-)
> > 
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 0b3dceccc89b..b6da5a9f3fc6 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -381,45 +381,39 @@ static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
> >                               uint64_t prp1, uint64_t prp2, DMADirection dir,
> >                               NvmeRequest *req)
> >  {
> > -    QEMUSGList qsg;
> > -    QEMUIOVector iov;
> >      uint16_t status = NVME_SUCCESS;
> >  
> > -    status = nvme_map_prp(n, &qsg, &iov, prp1, prp2, len, req);
> > +    status = nvme_map_prp(n, &req->qsg, &req->iov, prp1, prp2, len, req);
> 
> After this change, can we make nvme_map_prp() just receive
> NvmeRequest *req without &req->qsg, &req->iov by retrieve them from
> inside of the nvme_map_prp()?

Absolutely. I added a follow up patch to do this :)
diff mbox series

Patch

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 0b3dceccc89b..b6da5a9f3fc6 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -381,45 +381,39 @@  static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
                              uint64_t prp1, uint64_t prp2, DMADirection dir,
                              NvmeRequest *req)
 {
-    QEMUSGList qsg;
-    QEMUIOVector iov;
     uint16_t status = NVME_SUCCESS;
 
-    status = nvme_map_prp(n, &qsg, &iov, prp1, prp2, len, req);
+    status = nvme_map_prp(n, &req->qsg, &req->iov, prp1, prp2, len, req);
     if (status) {
         return status;
     }
 
-    if (qsg.nsg > 0) {
+    if (req->qsg.nsg > 0) {
         uint64_t residual;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            residual = dma_buf_write(ptr, len, &qsg);
+            residual = dma_buf_write(ptr, len, &req->qsg);
         } else {
-            residual = dma_buf_read(ptr, len, &qsg);
+            residual = dma_buf_read(ptr, len, &req->qsg);
         }
 
         if (unlikely(residual)) {
             trace_pci_nvme_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_sglist_destroy(&qsg);
     } else {
         size_t bytes;
 
         if (dir == DMA_DIRECTION_TO_DEVICE) {
-            bytes = qemu_iovec_to_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
         } else {
-            bytes = qemu_iovec_from_buf(&iov, 0, ptr, len);
+            bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
         }
 
         if (unlikely(bytes != len)) {
             trace_pci_nvme_err_invalid_dma();
             status = NVME_INVALID_FIELD | NVME_DNR;
         }
-
-        qemu_iovec_destroy(&iov);
     }
 
     return status;