diff mbox series

[v7,17/26] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET

Message ID 20220308123518.33800-18-xuanzhuo@linux.alibaba.com (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series virtio pci support VIRTIO_F_RING_RESET | expand

Checks

Context Check Description
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 5 maintainers not CCed: andrii@kernel.org kpsingh@kernel.org kafai@fb.com songliubraving@fb.com yhs@fb.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 121 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Xuan Zhuo March 8, 2022, 12:35 p.m. UTC
This patch implements virtio pci support for QUEUE RESET.

Performing reset on a queue is divided into these steps:

 1. virtio_reset_vq()              - notify the device to reset the queue
 2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
 3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
 4. virtio_enable_resetq()         - mmap vring to device, and enable the queue

This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
pci scenario.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/virtio/virtio_pci_common.c |  8 +--
 drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
 2 files changed, 88 insertions(+), 3 deletions(-)

Comments

Jason Wang March 9, 2022, 8:54 a.m. UTC | #1
在 2022/3/8 下午8:35, Xuan Zhuo 写道:
> This patch implements virtio pci support for QUEUE RESET.
>
> Performing reset on a queue is divided into these steps:
>
>   1. virtio_reset_vq()              - notify the device to reset the queue
>   2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
>   3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
>   4. virtio_enable_resetq()         - mmap vring to device, and enable the queue
>
> This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> pci scenario.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>   drivers/virtio/virtio_pci_common.c |  8 +--
>   drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
>   2 files changed, 88 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> index fdbde1db5ec5..863d3a8a0956 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
>   	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
>   	unsigned long flags;
>   
> -	spin_lock_irqsave(&vp_dev->lock, flags);
> -	list_del(&info->node);
> -	spin_unlock_irqrestore(&vp_dev->lock, flags);
> +	if (!vq->reset) {
> +		spin_lock_irqsave(&vp_dev->lock, flags);
> +		list_del(&info->node);
> +		spin_unlock_irqrestore(&vp_dev->lock, flags);
> +	}
>   
>   	vp_dev->del_vq(info);
>   	kfree(info);
> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> index 49a4493732cf..3c67d3607802 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
>   	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
>   			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
>   		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> +
> +	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> +		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
>   }
>   
>   /* virtio config->finalize_features() implementation */
> @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
>   	return 0;
>   }
>   
> +static int vp_modern_reset_vq(struct virtqueue *vq)
> +{
> +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> +	struct virtio_pci_vq_info *info;
> +	unsigned long flags;
> +	unsigned int irq;
> +
> +	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> +		return -ENOENT;
> +
> +	vp_modern_set_queue_reset(mdev, vq->index);
> +
> +	info = vp_dev->vqs[vq->index];
> +
> +	/* delete vq from irq handler */
> +	spin_lock_irqsave(&vp_dev->lock, flags);
> +	list_del(&info->node);
> +	spin_unlock_irqrestore(&vp_dev->lock, flags);
> +
> +	INIT_LIST_HEAD(&info->node);
> +
> +	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
> +
> +	/* sync irq callback. */
> +	if (vp_dev->intx_enabled) {
> +		irq = vp_dev->pci_dev->irq;
> +
> +	} else {
> +		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
> +			return 0;
> +
> +		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
> +	}
> +
> +	synchronize_irq(irq);


Synchronize_irq() is not sufficient here since it breaks the effort of 
the interrupt hardening which is done by commits:

080cd7c3ac87 virtio-pci: harden INTX interrupts
9e35276a5344 virtio_pci: harden MSI-X interrupts

Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were 
used for the affinity managed irq but we're discussing a fix.


> +
> +	return 0;
> +}
> +
> +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> +{
> +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> +	struct virtio_pci_vq_info *info;
> +	unsigned long flags, index;
> +	int err;
> +
> +	if (vq->reset != VIRTIO_VQ_RESET_STEP_VRING_ATTACH)
> +		return -EBUSY;
> +
> +	index = vq->index;
> +	info = vp_dev->vqs[index];
> +
> +	/* check queue reset status */
> +	if (vp_modern_get_queue_reset(mdev, index) != 1)
> +		return -EBUSY;
> +
> +	err = vp_active_vq(vq, info->msix_vector);
> +	if (err)
> +		return err;
> +
> +	if (vq->callback) {
> +		spin_lock_irqsave(&vp_dev->lock, flags);
> +		list_add(&info->node, &vp_dev->virtqueues);
> +		spin_unlock_irqrestore(&vp_dev->lock, flags);
> +	} else {
> +		INIT_LIST_HEAD(&info->node);
> +	}
> +
> +	vp_modern_set_queue_enable(&vp_dev->mdev, index, true);


Any reason we need to check queue_enable() here?

Thanks


> +	vq->reset = VIRTIO_VQ_RESET_STEP_NONE;
> +
> +	return 0;
> +}
> +
>   static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
>   {
>   	return vp_modern_config_vector(&vp_dev->mdev, vector);
> @@ -407,6 +486,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
>   	.set_vq_affinity = vp_set_vq_affinity,
>   	.get_vq_affinity = vp_get_vq_affinity,
>   	.get_shm_region  = vp_get_shm_region,
> +	.reset_vq	 = vp_modern_reset_vq,
> +	.enable_reset_vq = vp_modern_enable_reset_vq,
>   };
>   
>   static const struct virtio_config_ops virtio_pci_config_ops = {
> @@ -425,6 +506,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
>   	.set_vq_affinity = vp_set_vq_affinity,
>   	.get_vq_affinity = vp_get_vq_affinity,
>   	.get_shm_region  = vp_get_shm_region,
> +	.reset_vq	 = vp_modern_reset_vq,
> +	.enable_reset_vq = vp_modern_enable_reset_vq,
>   };
>   
>   /* the PCI probing function */
Xuan Zhuo March 9, 2022, 9:32 a.m. UTC | #2
On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasowang@redhat.com> wrote:
>
> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into these steps:
> >
> >   1. virtio_reset_vq()              - notify the device to reset the queue
> >   2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
> >   3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
> >   4. virtio_enable_resetq()         - mmap vring to device, and enable the queue
> >
> > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > pci scenario.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >   drivers/virtio/virtio_pci_common.c |  8 +--
> >   drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
> >   2 files changed, 88 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > index fdbde1db5ec5..863d3a8a0956 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> >   	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> >   	unsigned long flags;
> >
> > -	spin_lock_irqsave(&vp_dev->lock, flags);
> > -	list_del(&info->node);
> > -	spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +	if (!vq->reset) {
> > +		spin_lock_irqsave(&vp_dev->lock, flags);
> > +		list_del(&info->node);
> > +		spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +	}
> >
> >   	vp_dev->del_vq(info);
> >   	kfree(info);
> > diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> > index 49a4493732cf..3c67d3607802 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> >   	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> >   			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> >   		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > +
> > +	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > +		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> >   }
> >
> >   /* virtio config->finalize_features() implementation */
> > @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
> >   	return 0;
> >   }
> >
> > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > +{
> > +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +	struct virtio_pci_vq_info *info;
> > +	unsigned long flags;
> > +	unsigned int irq;
> > +
> > +	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > +		return -ENOENT;
> > +
> > +	vp_modern_set_queue_reset(mdev, vq->index);
> > +
> > +	info = vp_dev->vqs[vq->index];
> > +
> > +	/* delete vq from irq handler */
> > +	spin_lock_irqsave(&vp_dev->lock, flags);
> > +	list_del(&info->node);
> > +	spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +
> > +	INIT_LIST_HEAD(&info->node);
> > +
> > +	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
> > +
> > +	/* sync irq callback. */
> > +	if (vp_dev->intx_enabled) {
> > +		irq = vp_dev->pci_dev->irq;
> > +
> > +	} else {
> > +		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
> > +			return 0;
> > +
> > +		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
> > +	}
> > +
> > +	synchronize_irq(irq);
>
>
> Synchronize_irq() is not sufficient here since it breaks the effort of
> the interrupt hardening which is done by commits:
>
> 080cd7c3ac87 virtio-pci: harden INTX interrupts
> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>
> Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were
> used for the affinity managed irq but we're discussing a fix.

I need to understand it first.

>
>
> > +
> > +	return 0;
> > +}
> > +
> > +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> > +{
> > +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +	struct virtio_pci_vq_info *info;
> > +	unsigned long flags, index;
> > +	int err;
> > +
> > +	if (vq->reset != VIRTIO_VQ_RESET_STEP_VRING_ATTACH)
> > +		return -EBUSY;
> > +
> > +	index = vq->index;
> > +	info = vp_dev->vqs[index];
> > +
> > +	/* check queue reset status */
> > +	if (vp_modern_get_queue_reset(mdev, index) != 1)
> > +		return -EBUSY;
> > +
> > +	err = vp_active_vq(vq, info->msix_vector);
> > +	if (err)
> > +		return err;
> > +
> > +	if (vq->callback) {
> > +		spin_lock_irqsave(&vp_dev->lock, flags);
> > +		list_add(&info->node, &vp_dev->virtqueues);
> > +		spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +	} else {
> > +		INIT_LIST_HEAD(&info->node);
> > +	}
> > +
> > +	vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
>
>
> Any reason we need to check queue_enable() here?

The purpose of this function is to enable a reset vq, so call queue_enable() to
activate it.

Thanks.

>
> Thanks
>
>
> > +	vq->reset = VIRTIO_VQ_RESET_STEP_NONE;
> > +
> > +	return 0;
> > +}
> > +
> >   static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
> >   {
> >   	return vp_modern_config_vector(&vp_dev->mdev, vector);
> > @@ -407,6 +486,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
> >   	.set_vq_affinity = vp_set_vq_affinity,
> >   	.get_vq_affinity = vp_get_vq_affinity,
> >   	.get_shm_region  = vp_get_shm_region,
> > +	.reset_vq	 = vp_modern_reset_vq,
> > +	.enable_reset_vq = vp_modern_enable_reset_vq,
> >   };
> >
> >   static const struct virtio_config_ops virtio_pci_config_ops = {
> > @@ -425,6 +506,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
> >   	.set_vq_affinity = vp_set_vq_affinity,
> >   	.get_vq_affinity = vp_get_vq_affinity,
> >   	.get_shm_region  = vp_get_shm_region,
> > +	.reset_vq	 = vp_modern_reset_vq,
> > +	.enable_reset_vq = vp_modern_enable_reset_vq,
> >   };
> >
> >   /* the PCI probing function */
>
Xuan Zhuo March 10, 2022, 8:20 a.m. UTC | #3
On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasowang@redhat.com> wrote:
>
> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
> > This patch implements virtio pci support for QUEUE RESET.
> >
> > Performing reset on a queue is divided into these steps:
> >
> >   1. virtio_reset_vq()              - notify the device to reset the queue
> >   2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
> >   3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
> >   4. virtio_enable_resetq()         - mmap vring to device, and enable the queue
> >
> > This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
> > pci scenario.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >   drivers/virtio/virtio_pci_common.c |  8 +--
> >   drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
> >   2 files changed, 88 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
> > index fdbde1db5ec5..863d3a8a0956 100644
> > --- a/drivers/virtio/virtio_pci_common.c
> > +++ b/drivers/virtio/virtio_pci_common.c
> > @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
> >   	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
> >   	unsigned long flags;
> >
> > -	spin_lock_irqsave(&vp_dev->lock, flags);
> > -	list_del(&info->node);
> > -	spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +	if (!vq->reset) {
> > +		spin_lock_irqsave(&vp_dev->lock, flags);
> > +		list_del(&info->node);
> > +		spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +	}
> >
> >   	vp_dev->del_vq(info);
> >   	kfree(info);
> > diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
> > index 49a4493732cf..3c67d3607802 100644
> > --- a/drivers/virtio/virtio_pci_modern.c
> > +++ b/drivers/virtio/virtio_pci_modern.c
> > @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
> >   	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
> >   			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
> >   		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
> > +
> > +	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
> > +		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
> >   }
> >
> >   /* virtio config->finalize_features() implementation */
> > @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
> >   	return 0;
> >   }
> >
> > +static int vp_modern_reset_vq(struct virtqueue *vq)
> > +{
> > +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> > +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> > +	struct virtio_pci_vq_info *info;
> > +	unsigned long flags;
> > +	unsigned int irq;
> > +
> > +	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> > +		return -ENOENT;
> > +
> > +	vp_modern_set_queue_reset(mdev, vq->index);
> > +
> > +	info = vp_dev->vqs[vq->index];
> > +
> > +	/* delete vq from irq handler */
> > +	spin_lock_irqsave(&vp_dev->lock, flags);
> > +	list_del(&info->node);
> > +	spin_unlock_irqrestore(&vp_dev->lock, flags);
> > +
> > +	INIT_LIST_HEAD(&info->node);
> > +
> > +	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
> > +
> > +	/* sync irq callback. */
> > +	if (vp_dev->intx_enabled) {
> > +		irq = vp_dev->pci_dev->irq;
> > +
> > +	} else {
> > +		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
> > +			return 0;
> > +
> > +		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
> > +	}
> > +
> > +	synchronize_irq(irq);
>
>
> Synchronize_irq() is not sufficient here since it breaks the effort of
> the interrupt hardening which is done by commits:
>
> 080cd7c3ac87 virtio-pci: harden INTX interrupts
> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>
> Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were
> used for the affinity managed irq but we're discussing a fix.
>


ok, I think disable_irq() is still used here.

I want to determine the solution for this detail first. So I posted the code, I
hope Jason can help confirm this point first.

There are three situations in which vq corresponds to an interrupt

1. intx
2. msix: per vq vectors
2. msix: share irq

Essentially can be divided into two categories: per vq vectors and share irq.

For share irq is based on virtqueues to find vq, so I think it is safe as long
as list_del() is executed under the protection of the lock.

In the case of per vq vectors, disable_irq() is used.

Thanks.

+static int vp_modern_reset_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+       struct virtio_pci_vq_info *info;
+       unsigned long flags;
+       unsigned int irq;
+
+       if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+               return -ENOENT;
+
+       vp_modern_set_queue_reset(mdev, vq->index);
+
+       info = vp_dev->vqs[vq->index];
+
+       /* delete vq from irq handler */
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       vp_modern_set_queue_reset(mdev, vq->index);
+
+       info = vp_dev->vqs[vq->index];
+
+       /* delete vq from irq handler */
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+       INIT_LIST_HEAD(&info->node);
+
+       /* For the case where vq has an exclusive irq, to prevent the irq from
+        * being received again and the pending irq, call disable_irq().
+        *
+        * In the scenario based on shared interrupts, vq will be searched from
+        * the queue virtqueues. Since the previous list_del() has been deleted
+        * from the queue, it is impossible for vq to be called in this case.
+        * There is no need to close the corresponding interrupt.
+        */
+       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
+               disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+       vq->reset = true;
+
+       return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+       struct virtio_pci_vq_info *info;
+       unsigned long flags, index;
+       int err;
+
+       if (!vq->reset)
+               return -EBUSY;
+
+       index = vq->index;
+       info = vp_dev->vqs[index];
+
+       /* check queue reset status */
+       if (vp_modern_get_queue_reset(mdev, index) != 1)
+               return -EBUSY;
+
+       err = vp_active_vq(vq, info->msix_vector);
+       if (err)
+               return err;
+
+       if (vq->callback) {
+               spin_lock_irqsave(&vp_dev->lock, flags);
+               list_add(&info->node, &vp_dev->virtqueues);
+               spin_unlock_irqrestore(&vp_dev->lock, flags);
+       } else {
+               INIT_LIST_HEAD(&info->node);
+       }
+
+       vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+       vq->reset = false;
+
+       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
+               enable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
+
+       return 0;
+}
Jason Wang March 11, 2022, 5:05 a.m. UTC | #4
在 2022/3/9 下午5:32, Xuan Zhuo 写道:
> On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasowang@redhat.com> wrote:
>> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
>>> This patch implements virtio pci support for QUEUE RESET.
>>>
>>> Performing reset on a queue is divided into these steps:
>>>
>>>    1. virtio_reset_vq()              - notify the device to reset the queue
>>>    2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
>>>    3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
>>>    4. virtio_enable_resetq()         - mmap vring to device, and enable the queue
>>>
>>> This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
>>> pci scenario.
>>>
>>> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>>> ---
>>>    drivers/virtio/virtio_pci_common.c |  8 +--
>>>    drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
>>>    2 files changed, 88 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>>> index fdbde1db5ec5..863d3a8a0956 100644
>>> --- a/drivers/virtio/virtio_pci_common.c
>>> +++ b/drivers/virtio/virtio_pci_common.c
>>> @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
>>>    	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
>>>    	unsigned long flags;
>>>
>>> -	spin_lock_irqsave(&vp_dev->lock, flags);
>>> -	list_del(&info->node);
>>> -	spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +	if (!vq->reset) {
>>> +		spin_lock_irqsave(&vp_dev->lock, flags);
>>> +		list_del(&info->node);
>>> +		spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +	}
>>>
>>>    	vp_dev->del_vq(info);
>>>    	kfree(info);
>>> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>>> index 49a4493732cf..3c67d3607802 100644
>>> --- a/drivers/virtio/virtio_pci_modern.c
>>> +++ b/drivers/virtio/virtio_pci_modern.c
>>> @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
>>>    	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
>>>    			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
>>>    		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
>>> +
>>> +	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
>>> +		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
>>>    }
>>>
>>>    /* virtio config->finalize_features() implementation */
>>> @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
>>>    	return 0;
>>>    }
>>>
>>> +static int vp_modern_reset_vq(struct virtqueue *vq)
>>> +{
>>> +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>>> +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>>> +	struct virtio_pci_vq_info *info;
>>> +	unsigned long flags;
>>> +	unsigned int irq;
>>> +
>>> +	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
>>> +		return -ENOENT;
>>> +
>>> +	vp_modern_set_queue_reset(mdev, vq->index);
>>> +
>>> +	info = vp_dev->vqs[vq->index];
>>> +
>>> +	/* delete vq from irq handler */
>>> +	spin_lock_irqsave(&vp_dev->lock, flags);
>>> +	list_del(&info->node);
>>> +	spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +
>>> +	INIT_LIST_HEAD(&info->node);
>>> +
>>> +	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
>>> +
>>> +	/* sync irq callback. */
>>> +	if (vp_dev->intx_enabled) {
>>> +		irq = vp_dev->pci_dev->irq;
>>> +
>>> +	} else {
>>> +		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
>>> +			return 0;
>>> +
>>> +		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
>>> +	}
>>> +
>>> +	synchronize_irq(irq);
>>
>> Synchronize_irq() is not sufficient here since it breaks the effort of
>> the interrupt hardening which is done by commits:
>>
>> 080cd7c3ac87 virtio-pci: harden INTX interrupts
>> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>>
>> Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were
>> used for the affinity managed irq but we're discussing a fix.
> I need to understand it first.
>
>>
>>> +
>>> +	return 0;
>>> +}
>>> +
>>> +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
>>> +{
>>> +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>>> +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>>> +	struct virtio_pci_vq_info *info;
>>> +	unsigned long flags, index;
>>> +	int err;
>>> +
>>> +	if (vq->reset != VIRTIO_VQ_RESET_STEP_VRING_ATTACH)
>>> +		return -EBUSY;
>>> +
>>> +	index = vq->index;
>>> +	info = vp_dev->vqs[index];
>>> +
>>> +	/* check queue reset status */
>>> +	if (vp_modern_get_queue_reset(mdev, index) != 1)
>>> +		return -EBUSY;
>>> +
>>> +	err = vp_active_vq(vq, info->msix_vector);
>>> +	if (err)
>>> +		return err;
>>> +
>>> +	if (vq->callback) {
>>> +		spin_lock_irqsave(&vp_dev->lock, flags);
>>> +		list_add(&info->node, &vp_dev->virtqueues);
>>> +		spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +	} else {
>>> +		INIT_LIST_HEAD(&info->node);
>>> +	}
>>> +
>>> +	vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
>>
>> Any reason we need to check queue_enable() here?
> The purpose of this function is to enable a reset vq, so call queue_enable() to
> activate it.


Ok, this is what spec mandate.

Thanks


>
> Thanks.
>
>> Thanks
>>
>>
>>> +	vq->reset = VIRTIO_VQ_RESET_STEP_NONE;
>>> +
>>> +	return 0;
>>> +}
>>> +
>>>    static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
>>>    {
>>>    	return vp_modern_config_vector(&vp_dev->mdev, vector);
>>> @@ -407,6 +486,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
>>>    	.set_vq_affinity = vp_set_vq_affinity,
>>>    	.get_vq_affinity = vp_get_vq_affinity,
>>>    	.get_shm_region  = vp_get_shm_region,
>>> +	.reset_vq	 = vp_modern_reset_vq,
>>> +	.enable_reset_vq = vp_modern_enable_reset_vq,
>>>    };
>>>
>>>    static const struct virtio_config_ops virtio_pci_config_ops = {
>>> @@ -425,6 +506,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
>>>    	.set_vq_affinity = vp_set_vq_affinity,
>>>    	.get_vq_affinity = vp_get_vq_affinity,
>>>    	.get_shm_region  = vp_get_shm_region,
>>> +	.reset_vq	 = vp_modern_reset_vq,
>>> +	.enable_reset_vq = vp_modern_enable_reset_vq,
>>>    };
>>>
>>>    /* the PCI probing function */
Jason Wang March 11, 2022, 5:09 a.m. UTC | #5
在 2022/3/10 下午4:20, Xuan Zhuo 写道:
> On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasowang@redhat.com> wrote:
>> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
>>> This patch implements virtio pci support for QUEUE RESET.
>>>
>>> Performing reset on a queue is divided into these steps:
>>>
>>>    1. virtio_reset_vq()              - notify the device to reset the queue
>>>    2. virtqueue_detach_unused_buf()  - recycle the buffer submitted
>>>    3. virtqueue_reset_vring()        - reset the vring (may re-alloc)
>>>    4. virtio_enable_resetq()         - mmap vring to device, and enable the queue
>>>
>>> This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
>>> pci scenario.
>>>
>>> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>>> ---
>>>    drivers/virtio/virtio_pci_common.c |  8 +--
>>>    drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
>>>    2 files changed, 88 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>>> index fdbde1db5ec5..863d3a8a0956 100644
>>> --- a/drivers/virtio/virtio_pci_common.c
>>> +++ b/drivers/virtio/virtio_pci_common.c
>>> @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
>>>    	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
>>>    	unsigned long flags;
>>>
>>> -	spin_lock_irqsave(&vp_dev->lock, flags);
>>> -	list_del(&info->node);
>>> -	spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +	if (!vq->reset) {
>>> +		spin_lock_irqsave(&vp_dev->lock, flags);
>>> +		list_del(&info->node);
>>> +		spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +	}
>>>
>>>    	vp_dev->del_vq(info);
>>>    	kfree(info);
>>> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>>> index 49a4493732cf..3c67d3607802 100644
>>> --- a/drivers/virtio/virtio_pci_modern.c
>>> +++ b/drivers/virtio/virtio_pci_modern.c
>>> @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
>>>    	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
>>>    			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
>>>    		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
>>> +
>>> +	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
>>> +		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
>>>    }
>>>
>>>    /* virtio config->finalize_features() implementation */
>>> @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
>>>    	return 0;
>>>    }
>>>
>>> +static int vp_modern_reset_vq(struct virtqueue *vq)
>>> +{
>>> +	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>>> +	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>>> +	struct virtio_pci_vq_info *info;
>>> +	unsigned long flags;
>>> +	unsigned int irq;
>>> +
>>> +	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
>>> +		return -ENOENT;
>>> +
>>> +	vp_modern_set_queue_reset(mdev, vq->index);
>>> +
>>> +	info = vp_dev->vqs[vq->index];
>>> +
>>> +	/* delete vq from irq handler */
>>> +	spin_lock_irqsave(&vp_dev->lock, flags);
>>> +	list_del(&info->node);
>>> +	spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +
>>> +	INIT_LIST_HEAD(&info->node);
>>> +
>>> +	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
>>> +
>>> +	/* sync irq callback. */
>>> +	if (vp_dev->intx_enabled) {
>>> +		irq = vp_dev->pci_dev->irq;
>>> +
>>> +	} else {
>>> +		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
>>> +			return 0;
>>> +
>>> +		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
>>> +	}
>>> +
>>> +	synchronize_irq(irq);
>>
>> Synchronize_irq() is not sufficient here since it breaks the effort of
>> the interrupt hardening which is done by commits:
>>
>> 080cd7c3ac87 virtio-pci: harden INTX interrupts
>> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>>
>> Unfortunately  080cd7c3ac87 introduces an issue that disable_irq() were
>> used for the affinity managed irq but we're discussing a fix.
>>
>
> ok, I think disable_irq() is still used here.
>
> I want to determine the solution for this detail first. So I posted the code, I
> hope Jason can help confirm this point first.
>
> There are three situations in which vq corresponds to an interrupt
>
> 1. intx
> 2. msix: per vq vectors
> 2. msix: share irq
>
> Essentially can be divided into two categories: per vq vectors and share irq.
>
> For share irq is based on virtqueues to find vq, so I think it is safe as long
> as list_del() is executed under the protection of the lock.
>
> In the case of per vq vectors, disable_irq() is used.


See the discussion here[1], disable_irq() could be problematic for the 
block and scsi device that using affinity managed irq. We're waiting for 
the IRQ maintainer to comment on a solution. Other looks sane.

Thanks

[1] https://lkml.org/lkml/2022/3/8/743


>
> Thanks.
>
> +static int vp_modern_reset_vq(struct virtqueue *vq)
> +{
> +       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> +       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> +       struct virtio_pci_vq_info *info;
> +       unsigned long flags;
> +       unsigned int irq;
> +
> +       if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
> +               return -ENOENT;
> +
> +       vp_modern_set_queue_reset(mdev, vq->index);
> +
> +       info = vp_dev->vqs[vq->index];
> +
> +       /* delete vq from irq handler */
> +       spin_lock_irqsave(&vp_dev->lock, flags);
> +       list_del(&info->node);
> +       vp_modern_set_queue_reset(mdev, vq->index);
> +
> +       info = vp_dev->vqs[vq->index];
> +
> +       /* delete vq from irq handler */
> +       spin_lock_irqsave(&vp_dev->lock, flags);
> +       list_del(&info->node);
> +       spin_unlock_irqrestore(&vp_dev->lock, flags);
> +
> +       INIT_LIST_HEAD(&info->node);
> +
> +       /* For the case where vq has an exclusive irq, to prevent the irq from
> +        * being received again and the pending irq, call disable_irq().
> +        *
> +        * In the scenario based on shared interrupts, vq will be searched from
> +        * the queue virtqueues. Since the previous list_del() has been deleted
> +        * from the queue, it is impossible for vq to be called in this case.
> +        * There is no need to close the corresponding interrupt.
> +        */
> +       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> +               disable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
> +
> +       vq->reset = true;
> +
> +       return 0;
> +}
> +
> +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
> +{
> +       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> +       struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
> +       struct virtio_pci_vq_info *info;
> +       unsigned long flags, index;
> +       int err;
> +
> +       if (!vq->reset)
> +               return -EBUSY;
> +
> +       index = vq->index;
> +       info = vp_dev->vqs[index];
> +
> +       /* check queue reset status */
> +       if (vp_modern_get_queue_reset(mdev, index) != 1)
> +               return -EBUSY;
> +
> +       err = vp_active_vq(vq, info->msix_vector);
> +       if (err)
> +               return err;
> +
> +       if (vq->callback) {
> +               spin_lock_irqsave(&vp_dev->lock, flags);
> +               list_add(&info->node, &vp_dev->virtqueues);
> +               spin_unlock_irqrestore(&vp_dev->lock, flags);
> +       } else {
> +               INIT_LIST_HEAD(&info->node);
> +       }
> +
> +       vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
> +       vq->reset = false;
> +
> +       if (vp_dev->per_vq_vectors && msix_vec != VIRTIO_MSI_NO_VECTOR)
> +               enable_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
> +
> +       return 0;
> +}
>
>
diff mbox series

Patch

diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index fdbde1db5ec5..863d3a8a0956 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -248,9 +248,11 @@  static void vp_del_vq(struct virtqueue *vq)
 	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
 	unsigned long flags;
 
-	spin_lock_irqsave(&vp_dev->lock, flags);
-	list_del(&info->node);
-	spin_unlock_irqrestore(&vp_dev->lock, flags);
+	if (!vq->reset) {
+		spin_lock_irqsave(&vp_dev->lock, flags);
+		list_del(&info->node);
+		spin_unlock_irqrestore(&vp_dev->lock, flags);
+	}
 
 	vp_dev->del_vq(info);
 	kfree(info);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 49a4493732cf..3c67d3607802 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -34,6 +34,9 @@  static void vp_transport_features(struct virtio_device *vdev, u64 features)
 	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
 			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
 		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
+
+	if (features & BIT_ULL(VIRTIO_F_RING_RESET))
+		__virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
 }
 
 /* virtio config->finalize_features() implementation */
@@ -199,6 +202,82 @@  static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
 	return 0;
 }
 
+static int vp_modern_reset_vq(struct virtqueue *vq)
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+	struct virtio_pci_vq_info *info;
+	unsigned long flags;
+	unsigned int irq;
+
+	if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
+		return -ENOENT;
+
+	vp_modern_set_queue_reset(mdev, vq->index);
+
+	info = vp_dev->vqs[vq->index];
+
+	/* delete vq from irq handler */
+	spin_lock_irqsave(&vp_dev->lock, flags);
+	list_del(&info->node);
+	spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+	INIT_LIST_HEAD(&info->node);
+
+	vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
+
+	/* sync irq callback. */
+	if (vp_dev->intx_enabled) {
+		irq = vp_dev->pci_dev->irq;
+
+	} else {
+		if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
+			return 0;
+
+		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
+	}
+
+	synchronize_irq(irq);
+
+	return 0;
+}
+
+static int vp_modern_enable_reset_vq(struct virtqueue *vq)
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+	struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
+	struct virtio_pci_vq_info *info;
+	unsigned long flags, index;
+	int err;
+
+	if (vq->reset != VIRTIO_VQ_RESET_STEP_VRING_ATTACH)
+		return -EBUSY;
+
+	index = vq->index;
+	info = vp_dev->vqs[index];
+
+	/* check queue reset status */
+	if (vp_modern_get_queue_reset(mdev, index) != 1)
+		return -EBUSY;
+
+	err = vp_active_vq(vq, info->msix_vector);
+	if (err)
+		return err;
+
+	if (vq->callback) {
+		spin_lock_irqsave(&vp_dev->lock, flags);
+		list_add(&info->node, &vp_dev->virtqueues);
+		spin_unlock_irqrestore(&vp_dev->lock, flags);
+	} else {
+		INIT_LIST_HEAD(&info->node);
+	}
+
+	vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
+	vq->reset = VIRTIO_VQ_RESET_STEP_NONE;
+
+	return 0;
+}
+
 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 {
 	return vp_modern_config_vector(&vp_dev->mdev, vector);
@@ -407,6 +486,8 @@  static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
 	.set_vq_affinity = vp_set_vq_affinity,
 	.get_vq_affinity = vp_get_vq_affinity,
 	.get_shm_region  = vp_get_shm_region,
+	.reset_vq	 = vp_modern_reset_vq,
+	.enable_reset_vq = vp_modern_enable_reset_vq,
 };
 
 static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -425,6 +506,8 @@  static const struct virtio_config_ops virtio_pci_config_ops = {
 	.set_vq_affinity = vp_set_vq_affinity,
 	.get_vq_affinity = vp_get_vq_affinity,
 	.get_shm_region  = vp_get_shm_region,
+	.reset_vq	 = vp_modern_reset_vq,
+	.enable_reset_vq = vp_modern_enable_reset_vq,
 };
 
 /* the PCI probing function */