diff mbox series

[v2,12/15] hw/nvme: Initialize capability structures for primary/secondary controllers

Message ID 20211116153446.317143-13-lukasz.gieryk@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series hw/nvme: SR-IOV with Virtualization Enhancements | expand

Commit Message

Łukasz Gieryk Nov. 16, 2021, 3:34 p.m. UTC
With four new properties:
 - sriov_v{i,q}_flexible,
 - sriov_max_v{i,q}_per_vf,
one can configure the number of available flexible resources, as well as
the limits. The primary and secondary controller capability structures
are initialized accordingly.

Since the number of available queues (interrupts) now varies between
VF/PF, BAR size calculation is also adjusted.

Signed-off-by: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
---
 hw/nvme/ctrl.c       | 138 ++++++++++++++++++++++++++++++++++++++++---
 hw/nvme/nvme.h       |   4 ++
 include/block/nvme.h |   5 ++
 3 files changed, 140 insertions(+), 7 deletions(-)

Comments

Klaus Jensen Nov. 24, 2021, 8:04 a.m. UTC | #1
On Nov 16 16:34, Łukasz Gieryk wrote:
> With four new properties:
>  - sriov_v{i,q}_flexible,
>  - sriov_max_v{i,q}_per_vf,
> one can configure the number of available flexible resources, as well as
> the limits. The primary and secondary controller capability structures
> are initialized accordingly.
> 
> Since the number of available queues (interrupts) now varies between
> VF/PF, BAR size calculation is also adjusted.
> 
> Signed-off-by: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
> ---
>  hw/nvme/ctrl.c       | 138 ++++++++++++++++++++++++++++++++++++++++---
>  hw/nvme/nvme.h       |   4 ++
>  include/block/nvme.h |   5 ++
>  3 files changed, 140 insertions(+), 7 deletions(-)
> 
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index f8f5dfe204..f589ffde59 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -6358,13 +6444,40 @@ static void nvme_init_state(NvmeCtrl *n)
>      n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
>      n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
>  
> -    list->numcntl = cpu_to_le16(n->params.sriov_max_vfs);
> -    for (i = 0; i < n->params.sriov_max_vfs; i++) {
> +    list->numcntl = cpu_to_le16(max_vfs);
> +    for (i = 0; i < max_vfs; i++) {
>          sctrl = &list->sec[i];
>          sctrl->pcid = cpu_to_le16(n->cntlid);
>      }
>  
>      cap->cntlid = cpu_to_le16(n->cntlid);
> +    cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
> +
> +    if (pci_is_vf(&n->parent_obj)) {
> +        cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
> +    } else {
> +        cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
> +                                 n->params.sriov_vq_flexible);
> +        cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible);
> +        cap->vqrfap = cap->vqfrt;
> +        cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
> +        cap->vqfrsm = n->params.sriov_max_vq_per_vf ?
> +                        cpu_to_le16(n->params.sriov_max_vq_per_vf) :
> +                        cap->vqprt;

That this defaults to VQPRT doesn't seem right. It should default to
VQFRT. Does not make sense to report a maximum number of assignable
flexible resources that are bigger than the number of flexible resources
available.
Łukasz Gieryk Nov. 24, 2021, 2:26 p.m. UTC | #2
On Wed, Nov 24, 2021 at 09:04:31AM +0100, Klaus Jensen wrote:
> On Nov 16 16:34, Łukasz Gieryk wrote:
> > With four new properties:
> >  - sriov_v{i,q}_flexible,
> >  - sriov_max_v{i,q}_per_vf,
> > one can configure the number of available flexible resources, as well as
> > the limits. The primary and secondary controller capability structures
> > are initialized accordingly.
> > 
> > Since the number of available queues (interrupts) now varies between
> > VF/PF, BAR size calculation is also adjusted.
> > 
> > Signed-off-by: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
> > ---
> >  hw/nvme/ctrl.c       | 138 ++++++++++++++++++++++++++++++++++++++++---
> >  hw/nvme/nvme.h       |   4 ++
> >  include/block/nvme.h |   5 ++
> >  3 files changed, 140 insertions(+), 7 deletions(-)
> > 
> > diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> > index f8f5dfe204..f589ffde59 100644
> > --- a/hw/nvme/ctrl.c
> > +++ b/hw/nvme/ctrl.c
> > @@ -6358,13 +6444,40 @@ static void nvme_init_state(NvmeCtrl *n)
> >      n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
> >      n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
> >  
> > -    list->numcntl = cpu_to_le16(n->params.sriov_max_vfs);
> > -    for (i = 0; i < n->params.sriov_max_vfs; i++) {
> > +    list->numcntl = cpu_to_le16(max_vfs);
> > +    for (i = 0; i < max_vfs; i++) {
> >          sctrl = &list->sec[i];
> >          sctrl->pcid = cpu_to_le16(n->cntlid);
> >      }
> >  
> >      cap->cntlid = cpu_to_le16(n->cntlid);
> > +    cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
> > +
> > +    if (pci_is_vf(&n->parent_obj)) {
> > +        cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
> > +    } else {
> > +        cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
> > +                                 n->params.sriov_vq_flexible);
> > +        cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible);
> > +        cap->vqrfap = cap->vqfrt;
> > +        cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
> > +        cap->vqfrsm = n->params.sriov_max_vq_per_vf ?
> > +                        cpu_to_le16(n->params.sriov_max_vq_per_vf) :
> > +                        cap->vqprt;
> 
> That this defaults to VQPRT doesn't seem right. It should default to
> VQFRT. Does not make sense to report a maximum number of assignable
> flexible resources that are bigger than the number of flexible resources
> available.

I’ve explained in on of v1 threads why I think using the current default
is better than VQPRT.

What you’ve noticed is indeed an inconvenience, but it’s – at least in
my opinion – part of the design. What matters is the current number of
unassigned flexible resources. It may be lower than VQFRSM due to
multiple reasons:
 1) resources are bound to PF, 
 2) resources are bound to other VFs,
 3) resources simply don’t exist (not baked in silicone: VQFRT < VQFRSM).

If 1) and 2) are allowed to happen, and the user must be aware of that,
then why 3) shouldn’t?
Łukasz Gieryk Nov. 25, 2021, 12:02 p.m. UTC | #3
On Wed, Nov 24, 2021 at 03:26:30PM +0100, Łukasz Gieryk wrote:
> On Wed, Nov 24, 2021 at 09:04:31AM +0100, Klaus Jensen wrote:
> > On Nov 16 16:34, Łukasz Gieryk wrote:
> > > With four new properties:
> > >  - sriov_v{i,q}_flexible,
> > >  - sriov_max_v{i,q}_per_vf,
> > > one can configure the number of available flexible resources, as well as
> > > the limits. The primary and secondary controller capability structures
> > > are initialized accordingly.
> > > 
> > > Since the number of available queues (interrupts) now varies between
> > > VF/PF, BAR size calculation is also adjusted.
> > > 
> > > Signed-off-by: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
> > > ---
> > >  hw/nvme/ctrl.c       | 138 ++++++++++++++++++++++++++++++++++++++++---
> > >  hw/nvme/nvme.h       |   4 ++
> > >  include/block/nvme.h |   5 ++
> > >  3 files changed, 140 insertions(+), 7 deletions(-)
> > > 
> > > diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> > > index f8f5dfe204..f589ffde59 100644
> > > --- a/hw/nvme/ctrl.c
> > > +++ b/hw/nvme/ctrl.c
> > > @@ -6358,13 +6444,40 @@ static void nvme_init_state(NvmeCtrl *n)
> > >      n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
> > >      n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
> > >  
> > > -    list->numcntl = cpu_to_le16(n->params.sriov_max_vfs);
> > > -    for (i = 0; i < n->params.sriov_max_vfs; i++) {
> > > +    list->numcntl = cpu_to_le16(max_vfs);
> > > +    for (i = 0; i < max_vfs; i++) {
> > >          sctrl = &list->sec[i];
> > >          sctrl->pcid = cpu_to_le16(n->cntlid);
> > >      }
> > >  
> > >      cap->cntlid = cpu_to_le16(n->cntlid);
> > > +    cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
> > > +
> > > +    if (pci_is_vf(&n->parent_obj)) {
> > > +        cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
> > > +    } else {
> > > +        cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
> > > +                                 n->params.sriov_vq_flexible);
> > > +        cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible);
> > > +        cap->vqrfap = cap->vqfrt;
> > > +        cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
> > > +        cap->vqfrsm = n->params.sriov_max_vq_per_vf ?
> > > +                        cpu_to_le16(n->params.sriov_max_vq_per_vf) :
> > > +                        cap->vqprt;
> > 
> > That this defaults to VQPRT doesn't seem right. It should default to
> > VQFRT. Does not make sense to report a maximum number of assignable
> > flexible resources that are bigger than the number of flexible resources
> > available.
> 
> I’ve explained in on of v1 threads why I think using the current default
> is better than VQPRT.
> 
> What you’ve noticed is indeed an inconvenience, but it’s – at least in
> my opinion – part of the design. What matters is the current number of
> unassigned flexible resources. It may be lower than VQFRSM due to
> multiple reasons:
>  1) resources are bound to PF, 
>  2) resources are bound to other VFs,
>  3) resources simply don’t exist (not baked in silicone: VQFRT < VQFRSM).
> 
> If 1) and 2) are allowed to happen, and the user must be aware of that,
> then why 3) shouldn’t?
> 

I’ve done some more thinking, and now I’m not happy with my version, nor
the suggested VQPRT.

How about using this formula instead?:

v{q,i}frsm = sriov_max_v{I,q}_per_vf ? sriov_max_v{I,q}_per_vf :
             floor(sriov_v{i,q}_flexible / sriov_max_vfs)

v{q,i}frsm would end up with values similar/proportional to those
reported by and actual SR-IOV-capable device available on the market.
diff mbox series

Patch

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index f8f5dfe204..f589ffde59 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -36,6 +36,10 @@ 
  *              zoned.zasl=<N[optional]>, \
  *              zoned.auto_transition=<on|off[optional]>, \
  *              sriov_max_vfs=<N[optional]> \
+ *              sriov_vq_flexible=<N[optional]> \
+ *              sriov_vi_flexible=<N[optional]> \
+ *              sriov_max_vi_per_vf=<N[optional]> \
+ *              sriov_max_vq_per_vf=<N[optional]> \
  *              subsys=<subsys_id>
  *      -device nvme-ns,drive=<drive_id>,bus=<bus_name>,nsid=<nsid>,\
  *              zoned=<true|false[optional]>, \
@@ -113,6 +117,26 @@ 
  *   enables reporting of both SR-IOV and ARI capabilities by the NVMe device.
  *   Virtual function controllers will not report SR-IOV capability.
  *
+ * - `sriov_vq_flexible`
+ *   Indicates the total number of flexible queue resources assignable to all
+ *   the secondary controllers. Implicitly sets the number of PF-private
+ *   resources to (max_ioqpairs - sriov_vq_flexible).
+ *
+ * - `sriov_vi_flexible`
+ *   Indicates the total number of flexible interrupt resources assignable to
+ *   all the secondary controllers. Implicitly sets the number of PF-private
+ *   resources to (msix_qsize - sriov_vi_flexible).
+ *
+ * - `sriov_max_vi_per_vf`
+ *   Indicates the maximum number of virtual interrupt resources assignable
+ *   to a secondary controller. The default 0 resolves to the number of private
+ *   interrupt resources configured for PF.
+ *
+ * - `sriov_max_vq_per_vf`
+ *   Indicates the maximum number of virtual queue resources assignable to
+ *   a secondary controller. The default 0 resolves to the number of private
+ *   queue resources configured for PF.
+ *
  * nvme namespace device parameters
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  * - `shared`
@@ -185,6 +209,7 @@ 
 #define NVME_NUM_FW_SLOTS 1
 #define NVME_DEFAULT_MAX_ZA_SIZE (128 * KiB)
 #define NVME_MAX_VFS 127
+#define NVME_VF_RES_GRANULARITY 1
 #define NVME_VF_OFFSET 0x1
 #define NVME_VF_STRIDE 1
 
@@ -6338,6 +6363,58 @@  static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
             error_setg(errp, "PMR is not supported with SR-IOV");
             return;
         }
+
+        if (!params->sriov_vq_flexible || !params->sriov_vi_flexible) {
+            error_setg(errp, "both sriov_vq_flexible and sriov_vi_flexible"
+                       " must be set for the use of SR-IOV");
+            return;
+        }
+
+        if (params->sriov_vq_flexible < params->sriov_max_vfs * 2) {
+            error_setg(errp, "sriov_vq_flexible must be greater than or equal"
+                       " to %d (sriov_max_vfs * 2) and be a multiple of %d"
+                       " (sriov_max_vfs)", params->sriov_max_vfs * 2,
+                       params->sriov_max_vfs);
+            return;
+        }
+
+        if (params->max_ioqpairs < params->sriov_vq_flexible + 2) {
+            error_setg(errp, "sriov_vq_flexible - max_ioqpairs (PF-private"
+                       " queue resources) must be greater than or equal to 2");
+            return;
+        }
+
+        if (params->sriov_vi_flexible < params->sriov_max_vfs) {
+            error_setg(errp, "sriov_vi_flexible must be greater than or equal"
+                       " to %d (sriov_max_vfs) and be a multiple of %d"
+                       " (sriov_max_vfs)", params->sriov_max_vfs,
+                       params->sriov_max_vfs);
+            return;
+        }
+
+        if (params->msix_qsize < params->sriov_vi_flexible + 1) {
+            error_setg(errp, "sriov_vi_flexible - msix_qsize (PF-private"
+                       " interrupt resources) must be greater than or equal"
+                       " to 1");
+            return;
+        }
+
+        if (params->sriov_max_vi_per_vf &&
+            (params->sriov_max_vi_per_vf - 1) % NVME_VF_RES_GRANULARITY) {
+            error_setg(errp, "sriov_max_vi_per_vf must meet:"
+                       " (X - 1) %% %d == 0 and X >= 1",
+                       NVME_VF_RES_GRANULARITY);
+            return;
+        }
+
+        if (params->sriov_max_vq_per_vf &&
+            (params->sriov_max_vq_per_vf < 2 ||
+             (params->sriov_max_vq_per_vf - 1) % NVME_VF_RES_GRANULARITY)) {
+            error_setg(errp, "sriov_max_vq_per_vf must meet:"
+                       " (X - 1) %% %d == 0 and X >= 2",
+                       NVME_VF_RES_GRANULARITY);
+            return;
+        }
     }
 }
 
@@ -6346,10 +6423,19 @@  static void nvme_init_state(NvmeCtrl *n)
     NvmePriCtrlCap *cap = &n->pri_ctrl_cap;
     NvmeSecCtrlList *list = &n->sec_ctrl_list;
     NvmeSecCtrlEntry *sctrl;
+    uint8_t max_vfs;
     int i;
 
-    n->conf_ioqpairs = n->params.max_ioqpairs;
-    n->conf_msix_qsize = n->params.msix_qsize;
+    if (pci_is_vf(&n->parent_obj)) {
+        sctrl = nvme_sctrl(n);
+        max_vfs = 0;
+        n->conf_ioqpairs = sctrl->nvq ? le16_to_cpu(sctrl->nvq) - 1 : 0;
+        n->conf_msix_qsize = sctrl->nvi ? le16_to_cpu(sctrl->nvi) : 1;
+    } else {
+        max_vfs = n->params.sriov_max_vfs;
+        n->conf_ioqpairs = n->params.max_ioqpairs;
+        n->conf_msix_qsize = n->params.msix_qsize;
+    }
 
     n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
     n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
@@ -6358,13 +6444,40 @@  static void nvme_init_state(NvmeCtrl *n)
     n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
     n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
 
-    list->numcntl = cpu_to_le16(n->params.sriov_max_vfs);
-    for (i = 0; i < n->params.sriov_max_vfs; i++) {
+    list->numcntl = cpu_to_le16(max_vfs);
+    for (i = 0; i < max_vfs; i++) {
         sctrl = &list->sec[i];
         sctrl->pcid = cpu_to_le16(n->cntlid);
     }
 
     cap->cntlid = cpu_to_le16(n->cntlid);
+    cap->crt = NVME_CRT_VQ | NVME_CRT_VI;
+
+    if (pci_is_vf(&n->parent_obj)) {
+        cap->vqprt = cpu_to_le16(1 + n->conf_ioqpairs);
+    } else {
+        cap->vqprt = cpu_to_le16(1 + n->params.max_ioqpairs -
+                                 n->params.sriov_vq_flexible);
+        cap->vqfrt = cpu_to_le32(n->params.sriov_vq_flexible);
+        cap->vqrfap = cap->vqfrt;
+        cap->vqgran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
+        cap->vqfrsm = n->params.sriov_max_vq_per_vf ?
+                        cpu_to_le16(n->params.sriov_max_vq_per_vf) :
+                        cap->vqprt;
+    }
+
+    if (pci_is_vf(&n->parent_obj)) {
+        cap->viprt = cpu_to_le16(n->conf_msix_qsize);
+    } else {
+        cap->viprt = cpu_to_le16(n->params.msix_qsize -
+                                 n->params.sriov_vi_flexible);
+        cap->vifrt = cpu_to_le32(n->params.sriov_vi_flexible);
+        cap->virfap = cap->vifrt;
+        cap->vigran = cpu_to_le16(NVME_VF_RES_GRANULARITY);
+        cap->vifrsm = n->params.sriov_max_vi_per_vf ?
+                        cpu_to_le16(n->params.sriov_max_vi_per_vf) :
+                        cap->viprt;
+    }
 }
 
 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
@@ -6437,11 +6550,13 @@  static uint64_t nvme_bar_size(unsigned total_queues, unsigned total_irqs,
     return bar_size;
 }
 
-static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset,
-                            uint64_t bar_size)
+static void nvme_init_sriov(NvmeCtrl *n, PCIDevice *pci_dev, uint16_t offset)
 {
     uint16_t vf_dev_id = n->params.use_intel_id ?
                          PCI_DEVICE_ID_INTEL_NVME : PCI_DEVICE_ID_REDHAT_NVME;
+    uint64_t bar_size = nvme_bar_size(n->params.sriov_max_vq_per_vf,
+                                      n->params.sriov_max_vi_per_vf,
+                                      NULL, NULL);
 
     pcie_sriov_pf_init(pci_dev, offset, "nvme", vf_dev_id,
                        n->params.sriov_max_vfs, n->params.sriov_max_vfs,
@@ -6539,7 +6654,7 @@  static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
     }
 
     if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
-        nvme_init_sriov(n, pci_dev, 0x120, bar_size);
+        nvme_init_sriov(n, pci_dev, 0x120);
     }
 
     return 0;
@@ -6563,6 +6678,7 @@  static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
     NvmeIdCtrl *id = &n->id_ctrl;
     uint8_t *pci_conf = pci_dev->config;
     uint64_t cap = ldq_le_p(&n->bar.cap);
+    NvmeSecCtrlEntry *sctrl = nvme_sctrl(n);
 
     id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
     id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -6654,6 +6770,10 @@  static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
 
     stl_le_p(&n->bar.vs, NVME_SPEC_VER);
     n->bar.intmc = n->bar.intms = 0;
+
+    if (pci_is_vf(&n->parent_obj) && !sctrl->scs) {
+        stl_le_p(&n->bar.csts, NVME_CSTS_FAILED);
+    }
 }
 
 static int nvme_init_subsys(NvmeCtrl *n, Error **errp)
@@ -6793,6 +6913,10 @@  static Property nvme_props[] = {
     DEFINE_PROP_BOOL("zoned.auto_transition", NvmeCtrl,
                      params.auto_transition_zones, true),
     DEFINE_PROP_UINT8("sriov_max_vfs", NvmeCtrl, params.sriov_max_vfs, 0),
+    DEFINE_PROP_UINT16("sriov_vq_flexible", NvmeCtrl, params.sriov_vq_flexible, 0),
+    DEFINE_PROP_UINT16("sriov_vi_flexible", NvmeCtrl, params.sriov_vi_flexible, 0),
+    DEFINE_PROP_UINT8("sriov_max_vi_per_vf", NvmeCtrl, params.sriov_max_vi_per_vf, 0),
+    DEFINE_PROP_UINT8("sriov_max_vq_per_vf", NvmeCtrl, params.sriov_max_vq_per_vf, 0),
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 1401ac3904..bec56b01dd 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -397,6 +397,10 @@  typedef struct NvmeParams {
     bool     auto_transition_zones;
     bool     legacy_cmb;
     uint8_t  sriov_max_vfs;
+    uint16_t sriov_vq_flexible;
+    uint16_t sriov_vi_flexible;
+    uint8_t  sriov_max_vq_per_vf;
+    uint8_t  sriov_max_vi_per_vf;
 } NvmeParams;
 
 typedef struct NvmeCtrl {
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 96595ea8f1..26672d0a31 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -1488,6 +1488,11 @@  typedef struct QEMU_PACKED NvmePriCtrlCap {
     uint8_t     rsvd80[4016];
 } NvmePriCtrlCap;
 
+typedef enum NvmePriCtrlCapCrt {
+    NVME_CRT_VQ             = 1 << 0,
+    NVME_CRT_VI             = 1 << 1,
+} NvmePriCtrlCapCrt;
+
 typedef struct QEMU_PACKED NvmeSecCtrlEntry {
     uint16_t    scid;
     uint16_t    pcid;