@@ -185,6 +185,19 @@ static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
return vfio_pci_core_sriov_configure(vdev, nr_virtfn);
}
+static void vfio_pci_vf_reset_notification(struct pci_dev *pf, struct pci_dev *vf)
+{
+ struct vfio_pci_core_device *vdev = dev_get_drvdata(&pf->dev);
+ int i = pci_iov_vf_id(vf);
+
+ mutex_lock(&vdev->igate);
+
+ if (pf->is_physfn && vdev->vf_reset_trigger && vdev->vf_reset_trigger[i])
+ eventfd_signal(vdev->vf_reset_trigger[i], 1);
+
+ mutex_unlock(&vdev->igate);
+}
+
static const struct pci_device_id vfio_pci_table[] = {
{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_ANY_ID, PCI_ANY_ID) }, /* match all by default */
{}
@@ -198,6 +211,7 @@ static struct pci_driver vfio_pci_driver = {
.probe = vfio_pci_probe,
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
+ .sriov_vf_reset_notification = vfio_pci_vf_reset_notification,
.err_handler = &vfio_pci_core_err_handlers,
.driver_managed_dma = true,
};
@@ -686,6 +686,7 @@ void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
+ int i;
if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
@@ -707,6 +708,17 @@ void vfio_pci_core_close_device(struct vfio_device *core_vdev)
eventfd_ctx_put(vdev->req_trigger);
vdev->req_trigger = NULL;
}
+
+ if (vdev->pdev->is_physfn) {
+ for (i = 0; i < pci_sriov_get_totalvfs(vdev->pdev); i++) {
+ if (vdev->vf_reset_trigger && vdev->vf_reset_trigger[i]) {
+ eventfd_ctx_put(vdev->vf_reset_trigger[i]);
+ vdev->vf_reset_trigger[i] = NULL;
+ }
+ }
+ if (vdev->vf_reset_trigger)
+ kfree(vdev->vf_reset_trigger);
+ }
mutex_unlock(&vdev->igate);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_close_device);
@@ -718,6 +730,13 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
eeh_dev_open(vdev->pdev);
#endif
+ if (vdev->pdev->is_physfn) {
+ vdev->vf_reset_trigger = kzalloc(pci_sriov_get_totalvfs(vdev->pdev) *
+ sizeof(*vdev->vf_reset_trigger), GFP_KERNEL);
+ if (!vdev->vf_reset_trigger)
+ pci_info(vdev->pdev, "%s: couldn't enable vf reset interrupt\n", __func__);
+ }
+
if (vdev->sriov_pf_core_dev) {
mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
vdev->sriov_pf_core_dev->vf_token->users++;
@@ -764,6 +783,9 @@ static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_typ
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
+ } else if (irq_type == VFIO_PCI_VF_RESET_IRQ_INDEX) {
+ if (vdev->pdev->is_physfn)
+ return pci_sriov_get_totalvfs(vdev->pdev);
}
return 0;
@@ -1141,6 +1163,10 @@ static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
if (pci_is_pcie(vdev->pdev))
break;
fallthrough;
+ case VFIO_PCI_VF_RESET_IRQ_INDEX:
+ if (vdev->pdev->is_physfn)
+ break;
+ fallthrough;
default:
return -EINVAL;
}
@@ -776,6 +776,28 @@ static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
count, flags, data);
}
+static int vfio_pci_vf_reset_trigger(struct vfio_pci_core_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ int i;
+ int ret;
+ int *fd = data;
+
+ if (!vdev->vf_reset_trigger || index != VFIO_PCI_VF_RESET_IRQ_INDEX ||
+ start != 0 || count > pci_sriov_get_totalvfs(vdev->pdev))
+ return -EINVAL;
+
+ for (i = start; i < count; i++) {
+ ret = vfio_pci_set_ctx_trigger_single(&vdev->vf_reset_trigger[i],
+ 1, flags, &fd[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
unsigned index, unsigned start, unsigned count,
void *data)
@@ -825,6 +847,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
break;
}
break;
+ case VFIO_PCI_VF_RESET_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ if (vdev->pdev->is_physfn)
+ func = vfio_pci_vf_reset_trigger;
+ break;
+ }
+ break;
}
if (!func)
@@ -85,6 +85,7 @@ struct vfio_pci_core_device {
int ioeventfds_nr;
struct eventfd_ctx *err_trigger;
struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx **vf_reset_trigger;
struct eventfd_ctx *pm_wake_eventfd_ctx;
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
@@ -643,6 +643,7 @@ enum {
VFIO_PCI_MSIX_IRQ_INDEX,
VFIO_PCI_ERR_IRQ_INDEX,
VFIO_PCI_REQ_IRQ_INDEX,
+ VFIO_PCI_VF_RESET_IRQ_INDEX,
VFIO_PCI_NUM_IRQS
};
VF doesn't have the ability to reset itself completely which will cause the hardware in unstable state. So notify PF driver when the VF has been reset to let the PF resets the VF completely, and remove the VF out of schedule. How to implement this? Add the reset callback function in pci_driver Implement the callback functin in VFIO_PCI driver. Add the VF RESET IRQ for user mode driver to let the user mode driver know the VF has been reset. Signed-off-by: Emily Deng <Emily.Deng@amd.com> --- drivers/vfio/pci/vfio_pci.c | 14 ++++++++++++++ drivers/vfio/pci/vfio_pci_core.c | 26 ++++++++++++++++++++++++++ drivers/vfio/pci/vfio_pci_intrs.c | 30 ++++++++++++++++++++++++++++++ include/linux/vfio_pci_core.h | 1 + include/uapi/linux/vfio.h | 1 + 5 files changed, 72 insertions(+)