@@ -85,19 +85,14 @@ vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
/*
* INTx
*/
-static void vfio_send_intx_eventfd(void *opaque, void *unused)
+static void vfio_send_intx_eventfd(void *opaque, void *data)
{
struct vfio_pci_core_device *vdev = opaque;
if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
- struct vfio_pci_irq_ctx *ctx;
- struct eventfd_ctx *trigger;
+ struct vfio_pci_irq_ctx *ctx = data;
+ struct eventfd_ctx *trigger = READ_ONCE(ctx->trigger);
- ctx = vfio_irq_ctx_get(vdev, 0);
- if (WARN_ON_ONCE(!ctx))
- return;
-
- trigger = READ_ONCE(ctx->trigger);
if (likely(trigger))
eventfd_signal(trigger);
}
@@ -167,11 +162,11 @@ bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
* a signal is necessary, which can then be handled via a work queue
* or directly depending on the caller.
*/
-static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
+static int vfio_pci_intx_unmask_handler(void *opaque, void *data)
{
struct vfio_pci_core_device *vdev = opaque;
struct pci_dev *pdev = vdev->pdev;
- struct vfio_pci_irq_ctx *ctx;
+ struct vfio_pci_irq_ctx *ctx = data;
unsigned long flags;
int ret = 0;
@@ -187,10 +182,6 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
goto out_unlock;
}
- ctx = vfio_irq_ctx_get(vdev, 0);
- if (WARN_ON_ONCE(!ctx))
- goto out_unlock;
-
if (ctx->masked && !vdev->virq_disabled) {
/*
* A pending interrupt here would immediately trigger,
@@ -214,10 +205,12 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
{
+ struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
+
lockdep_assert_held(&vdev->igate);
- if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
- vfio_send_intx_eventfd(vdev, NULL);
+ if (vfio_pci_intx_unmask_handler(vdev, ctx) > 0)
+ vfio_send_intx_eventfd(vdev, ctx);
}
void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
@@ -249,7 +242,7 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
spin_unlock_irqrestore(&vdev->irqlock, flags);
if (ret == IRQ_HANDLED)
- vfio_send_intx_eventfd(vdev, NULL);
+ vfio_send_intx_eventfd(vdev, ctx);
return ret;
}
@@ -604,7 +597,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
if (fd >= 0)
return vfio_virqfd_enable((void *) vdev,
vfio_pci_intx_unmask_handler,
- vfio_send_intx_eventfd, NULL,
+ vfio_send_intx_eventfd, ctx,
&ctx->unmask, fd);
vfio_virqfd_disable(&ctx->unmask);
@@ -671,11 +664,11 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_send_intx_eventfd(vdev, NULL);
+ vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t trigger = *(uint8_t *)data;
if (trigger)
- vfio_send_intx_eventfd(vdev, NULL);
+ vfio_send_intx_eventfd(vdev, vfio_irq_ctx_get(vdev, 0));
}
return 0;
}
Further avoid lookup of the context object by passing it through the irqfd data field. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> --- drivers/vfio/pci/vfio_pci_intrs.c | 33 ++++++++++++------------------- 1 file changed, 13 insertions(+), 20 deletions(-)