@@ -179,4 +179,5 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
+
#endif /* __INTEL_GT_H__ */
@@ -166,6 +166,31 @@ void intel_gt_fini_tlb(struct intel_gt *gt)
mutex_destroy(>->tlb.invalidate_lock);
}
+void intel_gt_tlb_suspend_all(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ int i;
+
+ if (!HAS_GUC_TLB_INVALIDATION(i915))
+ return;
+ for_each_gt(gt, i915, i)
+ wake_up_all_tlb_invalidate(>->uc.guc);
+}
+
+void intel_gt_tlb_resume_all(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ int i;
+
+ if (!HAS_GUC_TLB_INVALIDATION(i915))
+ return;
+ for_each_gt(gt, i915, i) {
+ /* Perform tlb invalidation on both GT and GuC, in that order. */
+ intel_guc_invalidate_tlb_full(>->uc.guc);
+ intel_guc_invalidate_tlb(>->uc.guc);
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_tlb.c"
#endif
@@ -26,4 +26,7 @@ static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
return intel_gt_tlb_seqno(gt) | 1;
}
+void intel_gt_tlb_suspend_all(struct drm_i915_private *i915);
+void intel_gt_tlb_resume_all(struct drm_i915_private *i915);
+
#endif /* INTEL_TLB_H */
@@ -536,4 +536,5 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
+void wake_up_all_tlb_invalidate(struct intel_guc *guc);
#endif
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_irq.h"
#include "i915_trace.h"
/**
@@ -1796,13 +1797,23 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
intel_context_put(parent);
}
-void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+void wake_up_all_tlb_invalidate(struct intel_guc *guc)
{
struct intel_guc_tlb_wait *wait;
+ unsigned long i;
+
+ if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915))
+ return;
+ xa_for_each(&guc->tlb_lookup, i, wait) {
+ wake_up(&wait->wq);
+ }
+}
+
+void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
+{
struct intel_context *ce;
unsigned long index;
unsigned long flags;
- unsigned long i;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
@@ -1833,9 +1844,7 @@ void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stall
* The full GT reset will have cleared the TLB caches and flushed the
* G2H message queue; we can release all the blocked waiters.
*/
- xa_for_each(&guc->tlb_lookup, i, wait) {
- wake_up(&wait->wq);
- }
+ wake_up_all_tlb_invalidate(guc);
}
static void guc_cancel_context_requests(struct intel_context *ce)
@@ -1931,6 +1940,12 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
+
+ /*
+ * Wedged GT won't respond to any TLB invalidation request. Simply
+ * release all the blocked waiters.
+ */
+ wake_up_all_tlb_invalidate(guc);
}
void intel_guc_submission_reset_finish(struct intel_guc *guc)
@@ -4746,6 +4761,14 @@ static long must_wait_woken(struct wait_queue_entry *wq_entry, long timeout)
return timeout;
}
+static bool intel_gt_is_enabled(const struct intel_gt *gt)
+{
+ /* Check if GT is wedged or suspended */
+ if (intel_gt_is_wedged(gt) || !intel_irqs_enabled(gt->i915))
+ return false;
+ return true;
+}
+
static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
{
struct intel_guc_tlb_wait _wq, *wq = &_wq;
@@ -4763,7 +4786,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
};
u32 size = ARRAY_SIZE(action);
- if (!intel_guc_ct_enabled(&guc->ct))
+ if (!intel_gt_is_enabled(gt) ||
+ !intel_guc_ct_enabled(&guc->ct))
return -EINVAL;
init_waitqueue_head(&_wq.wq);
@@ -4806,7 +4830,8 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 type)
* requests that can be queued in CT buffer.
*/
#define OUTSTANDING_GUC_TIMEOUT_PERIOD (HZ * 2)
- if (!must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
+ if (intel_gt_is_enabled(gt) &&
+ !must_wait_woken(&wait, OUTSTANDING_GUC_TIMEOUT_PERIOD)) {
gt_err(gt,
"TLB invalidation response timed out for seqno %u\n", seqno);
err = -ETIME;
@@ -72,6 +72,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "gt/intel_tlb.h"
#include "pxp/intel_pxp.h"
#include "pxp/intel_pxp_debugfs.h"
@@ -1093,6 +1094,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dp_mst_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
+
+ intel_gt_tlb_suspend_all(dev_priv);
+
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
@@ -1264,6 +1268,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_gvt_resume(dev_priv);
+ intel_gt_tlb_resume_all(dev_priv);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;