[05/17] block/nvme: Replace qemu_try_blockalign0 by qemu_try_blockalign/memset
diff mbox series

Message ID 20200625184838.28172-6-philmd@redhat.com
State New
Headers show
Series
  • block/nvme: Various cleanups required to use multiple queues
Related show

Commit Message

Philippe Mathieu-Daudé June 25, 2020, 6:48 p.m. UTC
In the next commit we'll get ride of qemu_try_blockalign().
To ease review, first replace qemu_try_blockalign0() by explicit
calls to qemu_try_blockalign() and memset().

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 block/nvme.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

Comments

Stefan Hajnoczi June 26, 2020, 12:20 p.m. UTC | #1
On Thu, Jun 25, 2020 at 08:48:26PM +0200, Philippe Mathieu-Daudé wrote:
> In the next commit we'll get ride of qemu_try_blockalign().

s/ride/rid/

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

Patch
diff mbox series

diff --git a/block/nvme.c b/block/nvme.c
index ffda804a8e..bdddcd975d 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -158,12 +158,12 @@  static void nvme_init_queue(BlockDriverState *bs, NVMeQueue *q,
 
     bytes = ROUND_UP(nentries * entry_bytes, s->page_size);
     q->head = q->tail = 0;
-    q->queue = qemu_try_blockalign0(bs, bytes);
-
+    q->queue = qemu_try_blockalign(bs, bytes);
     if (!q->queue) {
         error_setg(errp, "Cannot allocate queue");
         return;
     }
+    memset(q->queue, 0, bytes);
     r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
     if (r) {
         error_setg(errp, "Cannot map queue");
@@ -204,11 +204,12 @@  static NVMeQueuePair *nvme_create_queue_pair(BlockDriverState *bs,
     if (!q) {
         return NULL;
     }
-    q->prp_list_pages = qemu_try_blockalign0(bs,
+    q->prp_list_pages = qemu_try_blockalign(bs,
                                           s->page_size * NVME_QUEUE_SIZE);
     if (!q->prp_list_pages) {
         goto fail;
     }
+    memset(q->prp_list_pages, 0, s->page_size * NVME_QUEUE_SIZE);
     qemu_mutex_init(&q->lock);
     q->index = idx;
     qemu_co_queue_init(&q->free_req_queue);
@@ -450,7 +451,7 @@  static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
     };
 
     idsz_max = MAX_CONST(sizeof(NvmeIdCtrl), sizeof(NvmeIdNs));
-    resp = qemu_try_blockalign0(bs, idsz_max);
+    resp = qemu_try_blockalign(bs, idsz_max);
     if (!resp) {
         error_setg(errp, "Cannot allocate buffer for identify response");
         goto out;
@@ -462,6 +463,8 @@  static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
         error_setg(errp, "Cannot map buffer for DMA");
         goto out;
     }
+
+    memset(resp, 0, sizeof(NvmeIdCtrl));
     cmd.prp1 = cpu_to_le64(iova);
 
     if (nvme_cmd_sync(bs, s->queues[QUEUE_INDEX_ADMIN], &cmd)) {
@@ -484,7 +487,7 @@  static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
     s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROS);
     s->supports_discard = !!(oncs & NVME_ONCS_DSM);
 
-    memset(resp, 0, 4096);
+    memset(resp, 0, sizeof(NvmeIdNs));
 
     cmd.cdw10 = 0;
     cmd.nsid = cpu_to_le32(namespace);
@@ -1202,11 +1205,11 @@  static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
 
     assert(s->nr_queues > 1);
 
-    buf = qemu_try_blockalign0(bs, s->page_size);
+    buf = qemu_try_blockalign(bs, s->page_size);
     if (!buf) {
         return -ENOMEM;
     }
-
+    memset(buf, 0, s->page_size);
     buf->nlb = cpu_to_le32(bytes >> s->blkshift);
     buf->slba = cpu_to_le64(offset >> s->blkshift);
     buf->cattr = 0;