diff mbox series

[v2,18/19] block/nvme: Switch to using the MSIX API

Message ID 20201026105504.4023620-19-philmd@redhat.com
State New, archived
Headers show
Series util/vfio-helpers: Allow using multiple MSIX IRQs | expand

Commit Message

Philippe Mathieu-Daudé Oct. 26, 2020, 10:55 a.m. UTC
In preparation of using multiple IRQs, switch to using the recently
introduced MSIX API. Instead of allocating and assigning IRQ in
a single step, we now have to use two distinct calls.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 block/nvme.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

Comments

Auger Eric Oct. 26, 2020, 8:32 p.m. UTC | #1
Hi Philippe,

On 10/26/20 11:55 AM, Philippe Mathieu-Daudé wrote:
> In preparation of using multiple IRQs, switch to using the recently
> introduced MSIX API. Instead of allocating and assigning IRQ in
> a single step, we now have to use two distinct calls.
> 
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
> ---
>  block/nvme.c | 14 ++++++++++++--
>  1 file changed, 12 insertions(+), 2 deletions(-)
> 
> diff --git a/block/nvme.c b/block/nvme.c
> index 46b09b3a3a7..191678540b6 100644
> --- a/block/nvme.c
> +++ b/block/nvme.c
> @@ -693,6 +693,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>      size_t device_page_size_min;
>      size_t device_page_size_max;
>      size_t iommu_page_size_min = 4096;
> +    unsigned irq_count = MSIX_IRQ_COUNT;
>  
>      qemu_co_mutex_init(&s->dma_map_lock);
>      qemu_co_queue_init(&s->dma_flush_queue);
> @@ -809,8 +810,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>          }
>      }
>  
> -    ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
> -                                 VFIO_PCI_MSIX_IRQ_INDEX, errp);
> +    ret = qemu_vfio_pci_msix_init_irqs(s->vfio, &irq_count, errp);
> +    if (ret) {
> +        if (ret == -EOVERFLOW) {
> +            error_append_hint(errp, "%u IRQs requested but only %u available\n",
> +                              MSIX_IRQ_COUNT, irq_count);
This message can be directly printed in qemu_vfio_pci_msix_init_irqs()
> +        }
> +        goto out;
> +    }
> +
> +    ret = qemu_vfio_pci_msix_set_irq(s->vfio, MSIX_SHARED_IRQ_IDX,
> +                                     s->irq_notifier, errp);
>      if (ret) {
>          goto out;
>      }
> 
Thanks

Eric
Philippe Mathieu-Daudé Oct. 27, 2020, 9:55 a.m. UTC | #2
On 10/26/20 9:32 PM, Auger Eric wrote:
> Hi Philippe,
> 
> On 10/26/20 11:55 AM, Philippe Mathieu-Daudé wrote:
>> In preparation of using multiple IRQs, switch to using the recently
>> introduced MSIX API. Instead of allocating and assigning IRQ in
>> a single step, we now have to use two distinct calls.
>>
>> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
>> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
>> ---
>>  block/nvme.c | 14 ++++++++++++--
>>  1 file changed, 12 insertions(+), 2 deletions(-)
>>
>> diff --git a/block/nvme.c b/block/nvme.c
>> index 46b09b3a3a7..191678540b6 100644
>> --- a/block/nvme.c
>> +++ b/block/nvme.c
>> @@ -693,6 +693,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>>      size_t device_page_size_min;
>>      size_t device_page_size_max;
>>      size_t iommu_page_size_min = 4096;
>> +    unsigned irq_count = MSIX_IRQ_COUNT;
>>  
>>      qemu_co_mutex_init(&s->dma_map_lock);
>>      qemu_co_queue_init(&s->dma_flush_queue);
>> @@ -809,8 +810,17 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>>          }
>>      }
>>  
>> -    ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
>> -                                 VFIO_PCI_MSIX_IRQ_INDEX, errp);
>> +    ret = qemu_vfio_pci_msix_init_irqs(s->vfio, &irq_count, errp);
>> +    if (ret) {
>> +        if (ret == -EOVERFLOW) {
>> +            error_append_hint(errp, "%u IRQs requested but only %u available\n",
>> +                              MSIX_IRQ_COUNT, irq_count);
> This message can be directly printed in qemu_vfio_pci_msix_init_irqs()

Good idea, thanks.
diff mbox series

Patch

diff --git a/block/nvme.c b/block/nvme.c
index 46b09b3a3a7..191678540b6 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -693,6 +693,7 @@  static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
     size_t device_page_size_min;
     size_t device_page_size_max;
     size_t iommu_page_size_min = 4096;
+    unsigned irq_count = MSIX_IRQ_COUNT;
 
     qemu_co_mutex_init(&s->dma_map_lock);
     qemu_co_queue_init(&s->dma_flush_queue);
@@ -809,8 +810,17 @@  static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
         }
     }
 
-    ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
-                                 VFIO_PCI_MSIX_IRQ_INDEX, errp);
+    ret = qemu_vfio_pci_msix_init_irqs(s->vfio, &irq_count, errp);
+    if (ret) {
+        if (ret == -EOVERFLOW) {
+            error_append_hint(errp, "%u IRQs requested but only %u available\n",
+                              MSIX_IRQ_COUNT, irq_count);
+        }
+        goto out;
+    }
+
+    ret = qemu_vfio_pci_msix_set_irq(s->vfio, MSIX_SHARED_IRQ_IDX,
+                                     s->irq_notifier, errp);
     if (ret) {
         goto out;
     }