@@ -39,6 +39,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_hw_engine_group.h"
+#include "xe_hw_fence.h"
#include "xe_hwmon.h"
#include "xe_irq.h"
#include "xe_memirq.h"
@@ -902,6 +903,7 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err;
+ xe_hw_fence_irq_init(&xe->user_fence_irq);
for_each_gt(gt, xe, id) {
last_gt = id;
@@ -944,6 +946,7 @@ int xe_device_probe(struct xe_device *xe)
xe_oa_fini(xe);
err_fini_gt:
+ xe_hw_fence_irq_finish(&xe->user_fence_irq);
for_each_gt(gt, xe, id) {
if (id < last_gt)
xe_gt_remove(gt);
@@ -979,6 +982,7 @@ void xe_device_remove(struct xe_device *xe)
xe_heci_gsc_fini(xe);
+ xe_hw_fence_irq_finish(&xe->user_fence_irq);
for_each_gt(gt, xe, id)
xe_gt_remove(gt);
}
@@ -507,6 +507,9 @@ struct xe_device {
int mode;
} wedged;
+ /** @user_fence_irq: User fence IRQ handler */
+ struct xe_hw_fence_irq user_fence_irq;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -822,8 +822,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
if (hwe->irq_handler)
hwe->irq_handler(hwe, intr_vec);
- if (intr_vec & GT_RENDER_USER_INTERRUPT)
+ if (intr_vec & GT_RENDER_USER_INTERRUPT) {
+ xe_hw_fence_irq_run(>_to_xe(hwe->gt)->user_fence_irq);
xe_hw_fence_irq_run(hwe->fence_irq);
+ }
}
/**
Imported user fences will not be tied to a specific queue or hardware engine class. Therefore, a device IRQ handler is needed to signal the associated exported DMA fences. Signed-off-by: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/xe/xe_device.c | 4 ++++ drivers/gpu/drm/xe/xe_device_types.h | 3 +++ drivers/gpu/drm/xe/xe_hw_engine.c | 4 +++- 3 files changed, 10 insertions(+), 1 deletion(-)