diff mbox series

hw/nvme: fix BAR size mismatch of SR-IOV VF

Message ID 20240604211306.2155791-1-minwoo.im@samsung.com (mailing list archive)
State New, archived
Headers show
Series hw/nvme: fix BAR size mismatch of SR-IOV VF | expand

Commit Message

Minwoo Im June 4, 2024, 9:13 p.m. UTC
PF initializes SR-IOV VF BAR0 region in nvme_init_sriov() with bar_size
calcaulted by Primary Controller Capability such as VQFRSM and VIFRSM
rather than `max_ioqpairs` and `msix_qsize` which is for PF only.

In this case, the bar size reported in nvme_init_sriov() by PF and
nvme_init_pci() by VF might differ especially with large number of
sriov_max_vfs (e.g., 127 which is curret maximum number of VFs).  And
this reports invalid BAR0 address of VFs to the host operating system
so that MMIO access will not be caught properly and, of course, NVMe
driver initialization is failed.

For example, if we give the following options, BAR size will be
initialized by PF with 4K, but VF will try to allocate 8K BAR0 size in
nvme_init_pci().

	#!/bin/bash

	nr_vf=$((127))
	nr_vq=$(($nr_vf * 2 + 2))
	nr_vi=$(($nr_vq / 2 + 1))
	nr_ioq=$(($nr_vq + 2))

	...

	-device nvme,serial=foo,id=nvme0,bus=rp2,subsys=subsys0,mdts=9,msix_qsize=$nr_ioq,max_ioqpairs=$nr_ioq,sriov_max_vfs=$nr_vf,sriov_vq_flexible=$nr_vq,sriov_vi_flexible=$nr_vi \

To fix this issue, this patch modifies the calculation of BAR size in
the PF and VF initialization by using different elements:

	PF: `max_ioqpairs + 1` with `msix_qsize`
	VF: VQFRSM with VIFRSM

Signed-off-by: Minwoo Im <minwoo.im@samsung.com>
---
 hw/nvme/ctrl.c | 19 +++++++++++++++----
 1 file changed, 15 insertions(+), 4 deletions(-)

Comments

Klaus Jensen June 6, 2024, 6:04 a.m. UTC | #1
On Jun  5 06:13, Minwoo Im wrote:
> PF initializes SR-IOV VF BAR0 region in nvme_init_sriov() with bar_size
> calcaulted by Primary Controller Capability such as VQFRSM and VIFRSM
> rather than `max_ioqpairs` and `msix_qsize` which is for PF only.
> 
> In this case, the bar size reported in nvme_init_sriov() by PF and
> nvme_init_pci() by VF might differ especially with large number of
> sriov_max_vfs (e.g., 127 which is curret maximum number of VFs).  And
> this reports invalid BAR0 address of VFs to the host operating system
> so that MMIO access will not be caught properly and, of course, NVMe
> driver initialization is failed.
> 
> For example, if we give the following options, BAR size will be
> initialized by PF with 4K, but VF will try to allocate 8K BAR0 size in
> nvme_init_pci().
> 
> 	#!/bin/bash
> 
> 	nr_vf=$((127))
> 	nr_vq=$(($nr_vf * 2 + 2))
> 	nr_vi=$(($nr_vq / 2 + 1))
> 	nr_ioq=$(($nr_vq + 2))
> 
> 	...
> 
> 	-device nvme,serial=foo,id=nvme0,bus=rp2,subsys=subsys0,mdts=9,msix_qsize=$nr_ioq,max_ioqpairs=$nr_ioq,sriov_max_vfs=$nr_vf,sriov_vq_flexible=$nr_vq,sriov_vi_flexible=$nr_vi \
> 
> To fix this issue, this patch modifies the calculation of BAR size in
> the PF and VF initialization by using different elements:
> 
> 	PF: `max_ioqpairs + 1` with `msix_qsize`
> 	VF: VQFRSM with VIFRSM
> 
> Signed-off-by: Minwoo Im <minwoo.im@samsung.com>

Thanks, looks good Minwoo!

Reviewed-by: Klaus Jensen <k.jensen@samsung.com>

> ---
>  hw/nvme/ctrl.c | 19 +++++++++++++++----
>  1 file changed, 15 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index 127c3d2383..57bc26034c 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -8093,6 +8093,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
>      uint8_t *pci_conf = pci_dev->config;
>      uint64_t bar_size;
>      unsigned msix_table_offset = 0, msix_pba_offset = 0;
> +    unsigned nr_vectors;
>      int ret;
>  
>      pci_conf[PCI_INTERRUPT_PIN] = 1;
> @@ -8125,9 +8126,19 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
>          assert(n->params.msix_qsize >= 1);
>  
>          /* add one to max_ioqpairs to account for the admin queue pair */
> -        bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1,
> -                                  n->params.msix_qsize, &msix_table_offset,
> -                                  &msix_pba_offset);
> +        if (!pci_is_vf(pci_dev)) {
> +            nr_vectors = n->params.msix_qsize;
> +            bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1,
> +                                      nr_vectors, &msix_table_offset,
> +                                      &msix_pba_offset);
> +        } else {
> +            NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev));
> +            NvmePriCtrlCap *cap = &pn->pri_ctrl_cap;
> +
> +            nr_vectors = le16_to_cpu(cap->vifrsm);
> +            bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), nr_vectors,
> +                                      &msix_table_offset, &msix_pba_offset);
> +        }
>  
>          memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size);
>          memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
> @@ -8141,7 +8152,7 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
>                               PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0);
>          }
>  
> -        ret = msix_init(pci_dev, n->params.msix_qsize,
> +        ret = msix_init(pci_dev, nr_vectors,
>                          &n->bar0, 0, msix_table_offset,
>                          &n->bar0, 0, msix_pba_offset, 0, errp);
>      }
> -- 
> 2.34.1
>
diff mbox series

Patch

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index 127c3d2383..57bc26034c 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -8093,6 +8093,7 @@  static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
     uint8_t *pci_conf = pci_dev->config;
     uint64_t bar_size;
     unsigned msix_table_offset = 0, msix_pba_offset = 0;
+    unsigned nr_vectors;
     int ret;
 
     pci_conf[PCI_INTERRUPT_PIN] = 1;
@@ -8125,9 +8126,19 @@  static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
         assert(n->params.msix_qsize >= 1);
 
         /* add one to max_ioqpairs to account for the admin queue pair */
-        bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1,
-                                  n->params.msix_qsize, &msix_table_offset,
-                                  &msix_pba_offset);
+        if (!pci_is_vf(pci_dev)) {
+            nr_vectors = n->params.msix_qsize;
+            bar_size = nvme_mbar_size(n->params.max_ioqpairs + 1,
+                                      nr_vectors, &msix_table_offset,
+                                      &msix_pba_offset);
+        } else {
+            NvmeCtrl *pn = NVME(pcie_sriov_get_pf(pci_dev));
+            NvmePriCtrlCap *cap = &pn->pri_ctrl_cap;
+
+            nr_vectors = le16_to_cpu(cap->vifrsm);
+            bar_size = nvme_mbar_size(le16_to_cpu(cap->vqfrsm), nr_vectors,
+                                      &msix_table_offset, &msix_pba_offset);
+        }
 
         memory_region_init(&n->bar0, OBJECT(n), "nvme-bar0", bar_size);
         memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
@@ -8141,7 +8152,7 @@  static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
                              PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0);
         }
 
-        ret = msix_init(pci_dev, n->params.msix_qsize,
+        ret = msix_init(pci_dev, nr_vectors,
                         &n->bar0, 0, msix_table_offset,
                         &n->bar0, 0, msix_pba_offset, 0, errp);
     }