Message ID | 20230921124040.145386-2-yishaih@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Introduce a vfio driver over virtio devices | expand |
On Thu, Sep 21, 2023 at 03:40:30PM +0300, Yishai Hadas wrote: > From: Feng Liu <feliu@nvidia.com> > > Currently VQ deletion callback vp_del_vqs() processes generic > virtio_device level VQ list instead of VQ information available at PCI > layer. > > To adhere to the layering, use the pci device level VQ information > stored in the virtqueues or vqs. > > This also prepares the code to handle PCI layer admin vq life cycle to > be managed within the pci layer and thereby avoid undesired deletion of > admin vq by upper layer drivers (net, console, vfio), in the del_vqs() > callback. > Signed-off-by: Feng Liu <feliu@nvidia.com> > Reviewed-by: Parav Pandit <parav@nvidia.com> > Reviewed-by: Jiri Pirko <jiri@nvidia.com> > Signed-off-by: Yishai Hadas <yishaih@nvidia.com> > --- > drivers/virtio/virtio_pci_common.c | 12 +++++++++--- > drivers/virtio/virtio_pci_common.h | 1 + > 2 files changed, 10 insertions(+), 3 deletions(-) > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c > index c2524a7207cf..7a3e6edc4dd6 100644 > --- a/drivers/virtio/virtio_pci_common.c > +++ b/drivers/virtio/virtio_pci_common.c > @@ -232,12 +232,16 @@ static void vp_del_vq(struct virtqueue *vq) > void vp_del_vqs(struct virtio_device *vdev) > { > struct virtio_pci_device *vp_dev = to_vp_device(vdev); > - struct virtqueue *vq, *n; > + struct virtqueue *vq; > int i; > > - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { > + for (i = 0; i < vp_dev->nvqs; i++) { > + if (!vp_dev->vqs[i]) > + continue; > + > + vq = vp_dev->vqs[i]->vq; > if (vp_dev->per_vq_vectors) { > - int v = vp_dev->vqs[vq->index]->msix_vector; > + int v = vp_dev->vqs[i]->msix_vector; > > if (v != VIRTIO_MSI_NO_VECTOR) { > int irq = pci_irq_vector(vp_dev->pci_dev, v); > @@ -294,6 +298,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, > vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); > if (!vp_dev->vqs) > return -ENOMEM; > + vp_dev->nvqs = nvqs; > > if (per_vq_vectors) { > /* Best option: one for change interrupt, one per vq. */ > @@ -365,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, > vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); > if (!vp_dev->vqs) > return -ENOMEM; > + vp_dev->nvqs = nvqs; > > err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, > dev_name(&vdev->dev), vp_dev); > diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h > index 4b773bd7c58c..602021967aaa 100644 > --- a/drivers/virtio/virtio_pci_common.h > +++ b/drivers/virtio/virtio_pci_common.h > @@ -60,6 +60,7 @@ struct virtio_pci_device { > > /* array of all queues for house-keeping */ > struct virtio_pci_vq_info **vqs; > + u32 nvqs; I don't much like it that we are adding more duplicated info here. In fact, we tried removing the vqs array in 5c34d002dcc7a6dd665a19d098b4f4cd5501ba1a - there was some bug in that patch and the author didn't have the time to debug so I reverted but I don't really think we need to add to that. > > /* MSI-X support */ > int msix_enabled; > -- > 2.27.0
On 2023-09-21 a.m.9:46, Michael S. Tsirkin wrote: > External email: Use caution opening links or attachments > > > On Thu, Sep 21, 2023 at 03:40:30PM +0300, Yishai Hadas wrote: >> From: Feng Liu <feliu@nvidia.com> >> >> Currently VQ deletion callback vp_del_vqs() processes generic >> virtio_device level VQ list instead of VQ information available at PCI >> layer. >> >> To adhere to the layering, use the pci device level VQ information >> stored in the virtqueues or vqs. >> >> This also prepares the code to handle PCI layer admin vq life cycle to >> be managed within the pci layer and thereby avoid undesired deletion of >> admin vq by upper layer drivers (net, console, vfio), in the del_vqs() >> callback. > >> Signed-off-by: Feng Liu <feliu@nvidia.com> >> Reviewed-by: Parav Pandit <parav@nvidia.com> >> Reviewed-by: Jiri Pirko <jiri@nvidia.com> >> Signed-off-by: Yishai Hadas <yishaih@nvidia.com> >> --- >> drivers/virtio/virtio_pci_common.c | 12 +++++++++--- >> drivers/virtio/virtio_pci_common.h | 1 + >> 2 files changed, 10 insertions(+), 3 deletions(-) >> >> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c >> index c2524a7207cf..7a3e6edc4dd6 100644 >> --- a/drivers/virtio/virtio_pci_common.c >> +++ b/drivers/virtio/virtio_pci_common.c >> @@ -232,12 +232,16 @@ static void vp_del_vq(struct virtqueue *vq) >> void vp_del_vqs(struct virtio_device *vdev) >> { >> struct virtio_pci_device *vp_dev = to_vp_device(vdev); >> - struct virtqueue *vq, *n; >> + struct virtqueue *vq; >> int i; >> >> - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { >> + for (i = 0; i < vp_dev->nvqs; i++) { >> + if (!vp_dev->vqs[i]) >> + continue; >> + >> + vq = vp_dev->vqs[i]->vq; >> if (vp_dev->per_vq_vectors) { >> - int v = vp_dev->vqs[vq->index]->msix_vector; >> + int v = vp_dev->vqs[i]->msix_vector; >> >> if (v != VIRTIO_MSI_NO_VECTOR) { >> int irq = pci_irq_vector(vp_dev->pci_dev, v); >> @@ -294,6 +298,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, >> vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); >> if (!vp_dev->vqs) >> return -ENOMEM; >> + vp_dev->nvqs = nvqs; >> >> if (per_vq_vectors) { >> /* Best option: one for change interrupt, one per vq. */ >> @@ -365,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, >> vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); >> if (!vp_dev->vqs) >> return -ENOMEM; >> + vp_dev->nvqs = nvqs; >> >> err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, >> dev_name(&vdev->dev), vp_dev); >> diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h >> index 4b773bd7c58c..602021967aaa 100644 >> --- a/drivers/virtio/virtio_pci_common.h >> +++ b/drivers/virtio/virtio_pci_common.h >> @@ -60,6 +60,7 @@ struct virtio_pci_device { >> >> /* array of all queues for house-keeping */ >> struct virtio_pci_vq_info **vqs; >> + u32 nvqs; > > I don't much like it that we are adding more duplicated info here. > In fact, we tried removing the vqs array in > 5c34d002dcc7a6dd665a19d098b4f4cd5501ba1a - there was some bug in that > patch and the author didn't have the time to debug > so I reverted but I don't really think we need to add to that. > Hi Michael As explained in commit message, this patch is mainly to prepare for the subsequent admin vq patches. The admin vq is also established using the common mechanism of vring, and is added to vdev->vqs in __vring_new_virtqueue(). So vdev->vqs contains all virtqueues, including rxq, txq, ctrlvq and admin vq. admin vq should be managed by the virito_pci layer and should not be created or deleted by upper driver (net, blk); When the upper driver was unloaded, it will call del_vqs() interface, which wll call vp_del_vqs(), and vp_del_vqs() should not delete the admin vq, but only delete the virtqueues created by the upper driver such as rxq, txq, and ctrlq. vp_dev->vqs[] array only contains virtqueues created by upper driver such as rxq, txq, ctrlq. Traversing vp_dev->vqs array can only delete the upper virtqueues, without the admin vq. Use the vdev->vqs linked list cannot meet the needs. Can such an explanation be explained clearly? Or do you have any other alternative methods? >> >> /* MSI-X support */ >> int msix_enabled; >> -- >> 2.27.0 >
On 2023-09-26 p.m.3:13, Feng Liu via Virtualization wrote: > External email: Use caution opening links or attachments > > > On 2023-09-21 a.m.9:46, Michael S. Tsirkin wrote: >> External email: Use caution opening links or attachments >> >> >> On Thu, Sep 21, 2023 at 03:40:30PM +0300, Yishai Hadas wrote: >>> From: Feng Liu <feliu@nvidia.com> >>> >>> pci_irq_vector(vp_dev->pci_dev, v); >>> @@ -294,6 +298,7 @@ static int vp_find_vqs_msix(struct virtio_device >>> *vdev, unsigned int nvqs, >>> vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); >>> if (!vp_dev->vqs) >>> return -ENOMEM; >>> + vp_dev->nvqs = nvqs; >>> >>> if (per_vq_vectors) { >>> /* Best option: one for change interrupt, one per vq. */ >>> @@ -365,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device >>> *vdev, unsigned int nvqs, >>> vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); >>> if (!vp_dev->vqs) >>> return -ENOMEM; >>> + vp_dev->nvqs = nvqs; >>> >>> err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, >>> IRQF_SHARED, >>> dev_name(&vdev->dev), vp_dev); >>> diff --git a/drivers/virtio/virtio_pci_common.h >>> b/drivers/virtio/virtio_pci_common.h >>> index 4b773bd7c58c..602021967aaa 100644 >>> --- a/drivers/virtio/virtio_pci_common.h >>> +++ b/drivers/virtio/virtio_pci_common.h >>> @@ -60,6 +60,7 @@ struct virtio_pci_device { >>> >>> /* array of all queues for house-keeping */ >>> struct virtio_pci_vq_info **vqs; >>> + u32 nvqs; >> >> I don't much like it that we are adding more duplicated info here. >> In fact, we tried removing the vqs array in >> 5c34d002dcc7a6dd665a19d098b4f4cd5501ba1a - there was some bug in that >> patch and the author didn't have the time to debug >> so I reverted but I don't really think we need to add to that. >> > > Hi Michael > > As explained in commit message, this patch is mainly to prepare for the > subsequent admin vq patches. > > The admin vq is also established using the common mechanism of vring, > and is added to vdev->vqs in __vring_new_virtqueue(). So vdev->vqs > contains all virtqueues, including rxq, txq, ctrlvq and admin vq. > > admin vq should be managed by the virito_pci layer and should not be > created or deleted by upper driver (net, blk); > When the upper driver was unloaded, it will call del_vqs() interface, > which wll call vp_del_vqs(), and vp_del_vqs() should not delete the > admin vq, but only delete the virtqueues created by the upper driver > such as rxq, txq, and ctrlq. > > > vp_dev->vqs[] array only contains virtqueues created by upper driver > such as rxq, txq, ctrlq. Traversing vp_dev->vqs array can only delete > the upper virtqueues, without the admin vq. Use the vdev->vqs linked > list cannot meet the needs. > > > Can such an explanation be explained clearly? Or do you have any other > alternative methods? > Hi, Michael Is the above explanations OK to you? Thanks Feng >>> >>> /* MSI-X support */ >>> int msix_enabled; >>> -- >>> 2.27.0 >> > _______________________________________________ > Virtualization mailing list > Virtualization@lists.linux-foundation.org > https://lists.linuxfoundation.org/mailman/listinfo/virtualization
On Wed, Sep 27, 2023 at 02:09:43PM -0400, Feng Liu wrote: > > > On 2023-09-26 p.m.3:13, Feng Liu via Virtualization wrote: > > External email: Use caution opening links or attachments > > > > > > On 2023-09-21 a.m.9:46, Michael S. Tsirkin wrote: > > > External email: Use caution opening links or attachments > > > > > > > > > On Thu, Sep 21, 2023 at 03:40:30PM +0300, Yishai Hadas wrote: > > > > From: Feng Liu <feliu@nvidia.com> > > > > > > > > > pci_irq_vector(vp_dev->pci_dev, v); > > > > @@ -294,6 +298,7 @@ static int vp_find_vqs_msix(struct > > > > virtio_device *vdev, unsigned int nvqs, > > > > vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); > > > > if (!vp_dev->vqs) > > > > return -ENOMEM; > > > > + vp_dev->nvqs = nvqs; > > > > > > > > if (per_vq_vectors) { > > > > /* Best option: one for change interrupt, one per vq. */ > > > > @@ -365,6 +370,7 @@ static int vp_find_vqs_intx(struct > > > > virtio_device *vdev, unsigned int nvqs, > > > > vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); > > > > if (!vp_dev->vqs) > > > > return -ENOMEM; > > > > + vp_dev->nvqs = nvqs; > > > > > > > > err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, > > > > IRQF_SHARED, > > > > dev_name(&vdev->dev), vp_dev); > > > > diff --git a/drivers/virtio/virtio_pci_common.h > > > > b/drivers/virtio/virtio_pci_common.h > > > > index 4b773bd7c58c..602021967aaa 100644 > > > > --- a/drivers/virtio/virtio_pci_common.h > > > > +++ b/drivers/virtio/virtio_pci_common.h > > > > @@ -60,6 +60,7 @@ struct virtio_pci_device { > > > > > > > > /* array of all queues for house-keeping */ > > > > struct virtio_pci_vq_info **vqs; > > > > + u32 nvqs; > > > > > > I don't much like it that we are adding more duplicated info here. > > > In fact, we tried removing the vqs array in > > > 5c34d002dcc7a6dd665a19d098b4f4cd5501ba1a - there was some bug in that > > > patch and the author didn't have the time to debug > > > so I reverted but I don't really think we need to add to that. > > > > > > > Hi Michael > > > > As explained in commit message, this patch is mainly to prepare for the > > subsequent admin vq patches. > > > > The admin vq is also established using the common mechanism of vring, > > and is added to vdev->vqs in __vring_new_virtqueue(). So vdev->vqs > > contains all virtqueues, including rxq, txq, ctrlvq and admin vq. > > > > admin vq should be managed by the virito_pci layer and should not be > > created or deleted by upper driver (net, blk); > > When the upper driver was unloaded, it will call del_vqs() interface, > > which wll call vp_del_vqs(), and vp_del_vqs() should not delete the > > admin vq, but only delete the virtqueues created by the upper driver > > such as rxq, txq, and ctrlq. > > > > > > vp_dev->vqs[] array only contains virtqueues created by upper driver > > such as rxq, txq, ctrlq. Traversing vp_dev->vqs array can only delete > > the upper virtqueues, without the admin vq. Use the vdev->vqs linked > > list cannot meet the needs. > > > > > > Can such an explanation be explained clearly? Or do you have any other > > alternative methods? > > > > Hi, Michael > Is the above explanations OK to you? > > Thanks > Feng First, the patch only addresses pci. Second, yes driver unload calls del_vqs but doesn't it also reset the device? If this happens while vfio tries to send commands to it then you have other problems. And, for the baroque need of admin vq which most devices don't have you are duplicating logic and wasting memory for everyone. What is a sane solution? virtio core was never designed to allow two drivers accessing the same device. So don't try, add the logic of device access in virtio core. I feel the problem won't even exist if instead of just exposing the device pointer you expose a sane interface. > > > > > > > > /* MSI-X support */ > > > > int msix_enabled; > > > > -- > > > > 2.27.0 > > > > > _______________________________________________ > > Virtualization mailing list > > Virtualization@lists.linux-foundation.org > > https://lists.linuxfoundation.org/mailman/listinfo/virtualization
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index c2524a7207cf..7a3e6edc4dd6 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -232,12 +232,16 @@ static void vp_del_vq(struct virtqueue *vq) void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; + struct virtqueue *vq; int i; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + for (i = 0; i < vp_dev->nvqs; i++) { + if (!vp_dev->vqs[i]) + continue; + + vq = vp_dev->vqs[i]->vq; if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; + int v = vp_dev->vqs[i]->msix_vector; if (v != VIRTIO_MSI_NO_VECTOR) { int irq = pci_irq_vector(vp_dev->pci_dev, v); @@ -294,6 +298,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) return -ENOMEM; + vp_dev->nvqs = nvqs; if (per_vq_vectors) { /* Best option: one for change interrupt, one per vq. */ @@ -365,6 +370,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); if (!vp_dev->vqs) return -ENOMEM; + vp_dev->nvqs = nvqs; err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 4b773bd7c58c..602021967aaa 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -60,6 +60,7 @@ struct virtio_pci_device { /* array of all queues for house-keeping */ struct virtio_pci_vq_info **vqs; + u32 nvqs; /* MSI-X support */ int msix_enabled;