diff mbox series

[RFC,v3,1/5] block/nvme: Use an array of EventNotifier

Message ID 20200818164509.736367-2-philmd@redhat.com (mailing list archive)
State New, archived
Headers show
Series util/vfio-helpers: Add support for multiple IRQs | expand

Commit Message

Philippe Mathieu-Daudé Aug. 18, 2020, 4:45 p.m. UTC
In preparation of using multiple IRQ (thus multiple eventfds)
make BDRVNVMeState::irq_notifier an array (for now of a single
element, the admin queue notifier).

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 block/nvme.c | 28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

Comments

Stefan Hajnoczi Aug. 19, 2020, 8:08 a.m. UTC | #1
On Tue, Aug 18, 2020 at 06:45:05PM +0200, Philippe Mathieu-Daudé wrote:
> In preparation of using multiple IRQ (thus multiple eventfds)
> make BDRVNVMeState::irq_notifier an array (for now of a single
> element, the admin queue notifier).
> 
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
>  block/nvme.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)

This looks like an intermediate step before using multiple irqs. I think
it makes the code confusing because on one hand INDEX_ADMIN gives the
impression that INDEX_IO() should be used for io queues, while on the
other hand only a single EventNotifier is allocated and we actually
can't use INDEX_IO() yet.

If this intermediate patch is really necessary, please don't use
INDEX_ADMIN. Define a new constant instead:

  /* This driver shares a single MSIX IRQ for the admin and I/O queues */
  #define MSIX_SHARED_IRQ_IDX 0

In the future the array index can be changed to INDEX_ADMIN and
INDEX_IO(n) when there are multiple EventNotifiers.

I think that would make the code clearer.
Philippe Mathieu-Daudé Aug. 19, 2020, 3:55 p.m. UTC | #2
On 8/19/20 10:08 AM, Stefan Hajnoczi wrote:
> On Tue, Aug 18, 2020 at 06:45:05PM +0200, Philippe Mathieu-Daudé wrote:
>> In preparation of using multiple IRQ (thus multiple eventfds)
>> make BDRVNVMeState::irq_notifier an array (for now of a single
>> element, the admin queue notifier).
>>
>> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
>> ---
>>  block/nvme.c | 28 ++++++++++++++++++----------
>>  1 file changed, 18 insertions(+), 10 deletions(-)
> 
> This looks like an intermediate step before using multiple irqs. I think
> it makes the code confusing because on one hand INDEX_ADMIN gives the
> impression that INDEX_IO() should be used for io queues, while on the
> other hand only a single EventNotifier is allocated and we actually
> can't use INDEX_IO() yet.
> 
> If this intermediate patch is really necessary, please don't use
> INDEX_ADMIN. Define a new constant instead:
> 
>   /* This driver shares a single MSIX IRQ for the admin and I/O queues */
>   #define MSIX_SHARED_IRQ_IDX 0
> 
> In the future the array index can be changed to INDEX_ADMIN and
> INDEX_IO(n) when there are multiple EventNotifiers.
> 
> I think that would make the code clearer.

Very good idea, thanks!
diff mbox series

Patch

diff --git a/block/nvme.c b/block/nvme.c
index a61e86a83eb..cdd16d451e7 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -106,6 +106,9 @@  QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
 #define INDEX_ADMIN     0
 #define INDEX_IO(n)     (1 + n)
 
+/* This driver shares a single MSIX IRQ for the admin and I/O queues */
+#define MSIX_IRQ_COUNT  1
+
 struct BDRVNVMeState {
     AioContext *aio_context;
     QEMUVFIOState *vfio;
@@ -120,7 +123,7 @@  struct BDRVNVMeState {
     /* How many uint32_t elements does each doorbell entry take. */
     size_t doorbell_scale;
     bool write_cache_supported;
-    EventNotifier irq_notifier;
+    EventNotifier irq_notifier[MSIX_IRQ_COUNT];
 
     uint64_t nsze; /* Namespace size reported by identify command */
     int nsid;      /* The namespace id to read/write data. */
@@ -631,7 +634,8 @@  static bool nvme_poll_queues(BDRVNVMeState *s)
 
 static void nvme_handle_event(EventNotifier *n)
 {
-    BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
+    BDRVNVMeState *s = container_of(n, BDRVNVMeState,
+                                    irq_notifier[INDEX_ADMIN]);
 
     trace_nvme_handle_event(s);
     event_notifier_test_and_clear(n);
@@ -683,7 +687,8 @@  out_error:
 static bool nvme_poll_cb(void *opaque)
 {
     EventNotifier *e = opaque;
-    BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
+    BDRVNVMeState *s = container_of(e, BDRVNVMeState,
+                                    irq_notifier[INDEX_ADMIN]);
 
     trace_nvme_poll_cb(s);
     return nvme_poll_queues(s);
@@ -705,7 +710,7 @@  static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
     s->device = g_strdup(device);
     s->nsid = namespace;
     s->aio_context = bdrv_get_aio_context(bs);
-    ret = event_notifier_init(&s->irq_notifier, 0);
+    ret = event_notifier_init(&s->irq_notifier[INDEX_ADMIN], 0);
     if (ret) {
         error_setg(errp, "Failed to init event notifier");
         return ret;
@@ -784,12 +789,13 @@  static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
         }
     }
 
-    ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
+    ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
                                  VFIO_PCI_MSIX_IRQ_INDEX, errp);
     if (ret) {
         goto out;
     }
-    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+    aio_set_event_notifier(bdrv_get_aio_context(bs),
+                           &s->irq_notifier[INDEX_ADMIN],
                            false, nvme_handle_event, nvme_poll_cb);
 
     nvme_identify(bs, namespace, &local_err);
@@ -872,9 +878,10 @@  static void nvme_close(BlockDriverState *bs)
         nvme_free_queue_pair(s->queues[i]);
     }
     g_free(s->queues);
-    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+    aio_set_event_notifier(bdrv_get_aio_context(bs),
+                           &s->irq_notifier[INDEX_ADMIN],
                            false, NULL, NULL);
-    event_notifier_cleanup(&s->irq_notifier);
+    event_notifier_cleanup(&s->irq_notifier[INDEX_ADMIN]);
     qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
     qemu_vfio_close(s->vfio);
 
@@ -1381,7 +1388,8 @@  static void nvme_detach_aio_context(BlockDriverState *bs)
         q->completion_bh = NULL;
     }
 
-    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
+    aio_set_event_notifier(bdrv_get_aio_context(bs),
+                           &s->irq_notifier[INDEX_ADMIN],
                            false, NULL, NULL);
 }
 
@@ -1391,7 +1399,7 @@  static void nvme_attach_aio_context(BlockDriverState *bs,
     BDRVNVMeState *s = bs->opaque;
 
     s->aio_context = new_context;
-    aio_set_event_notifier(new_context, &s->irq_notifier,
+    aio_set_event_notifier(new_context, &s->irq_notifier[INDEX_ADMIN],
                            false, nvme_handle_event, nvme_poll_cb);
 
     for (int i = 0; i < s->nr_queues; i++) {