diff mbox series

[v3,14/16] block/nvme: Extract nvme_poll_queue()

Message ID 20200704213051.19749-15-philmd@redhat.com (mailing list archive)
State New, archived
Headers show
Series block/nvme: Various cleanups required to use multiple queues | expand

Commit Message

Philippe Mathieu-Daudé July 4, 2020, 9:30 p.m. UTC
As we want to do per-queue polling, extract the nvme_poll_queue()
method which operates on a single queue.

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
Stefan better double check here!
---
 block/nvme.c | 44 +++++++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 17 deletions(-)

Comments

Stefan Hajnoczi July 6, 2020, 11:40 a.m. UTC | #1
On Sat, Jul 04, 2020 at 11:30:49PM +0200, Philippe Mathieu-Daudé wrote:
> As we want to do per-queue polling, extract the nvme_poll_queue()
> method which operates on a single queue.
> 
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
> Stefan better double check here!
> ---
>  block/nvme.c | 44 +++++++++++++++++++++++++++-----------------
>  1 file changed, 27 insertions(+), 17 deletions(-)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
diff mbox series

Patch

diff --git a/block/nvme.c b/block/nvme.c
index 51ac36dc4f..a6ff660ad2 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -590,31 +590,41 @@  out:
     qemu_vfree(id);
 }
 
+static bool nvme_poll_queue(NVMeQueuePair *q)
+{
+    bool progress = false;
+
+    const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
+    NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
+
+    /*
+     * Do an early check for completions. q->lock isn't needed because
+     * nvme_process_completion() only runs in the event loop thread and
+     * cannot race with itself.
+     */
+    if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
+        return false;
+    }
+
+    qemu_mutex_lock(&q->lock);
+    while (nvme_process_completion(q)) {
+        /* Keep polling */
+        progress = true;
+    }
+    qemu_mutex_unlock(&q->lock);
+
+    return progress;
+}
+
 static bool nvme_poll_queues(BDRVNVMeState *s)
 {
     bool progress = false;
     int i;
 
     for (i = 0; i < s->nr_queues; i++) {
-        NVMeQueuePair *q = s->queues[i];
-        const size_t cqe_offset = q->cq.head * NVME_CQ_ENTRY_BYTES;
-        NvmeCqe *cqe = (NvmeCqe *)&q->cq.queue[cqe_offset];
-
-        /*
-         * Do an early check for completions. q->lock isn't needed because
-         * nvme_process_completion() only runs in the event loop thread and
-         * cannot race with itself.
-         */
-        if ((le16_to_cpu(cqe->status) & 0x1) == q->cq_phase) {
-            continue;
-        }
-
-        qemu_mutex_lock(&q->lock);
-        while (nvme_process_completion(q)) {
-            /* Keep polling */
+        if (nvme_poll_queue(s->queues[i])) {
             progress = true;
         }
-        qemu_mutex_unlock(&q->lock);
     }
     return progress;
 }