diff mbox series

[CI,DO_NOT_MERGE,3/3] drm/i915/selftests: add forcewake_with_spinners tests

Message ID 20230517194040.3857137-3-andrzej.hajda@intel.com (mailing list archive)
State New, archived
Headers show
Series [CI,DO_NOT_MERGE,1/3] drm/i915/mtl: do not enable render power-gating on MTL | expand

Commit Message

Andrzej Hajda May 17, 2023, 7:40 p.m. UTC
The test examines if running spinners do not interfere with forcewake.

Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
---
 drivers/gpu/drm/i915/selftests/intel_uncore.c | 85 +++++++++++++++++++
 1 file changed, 85 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e4281508d5808b..0ce8a5c5ee0064 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -22,7 +22,10 @@ 
  *
  */
 
+#include <i915_gpu_error.h>
+#include <gt/intel_gpu_commands.h>
 #include "../i915_selftest.h"
+#include <selftests/igt_spinner.h>
 
 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
 				unsigned int num_ranges,
@@ -342,12 +345,94 @@  static int live_fw_table(void *arg)
 				    GRAPHICS_VER(gt->i915) >= 9);
 }
 
+static int live_forcewake_with_spinners(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_uncore_forcewake_domain *domain;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
+	struct igt_spinner spin;
+	unsigned int tmp;
+	int err;
+
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+	err = igt_spinner_init(&spin, gt);
+	if (err)
+		goto err_rpm;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		struct i915_request *rq;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		pr_info("%s: Spinning %s\n", __func__, engine->name);
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto err_spin;
+		}
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_spin;
+		}
+		i915_request_add(rq);
+	}
+
+	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+	for_each_fw_domain(domain, gt->uncore, tmp) {
+		if (readl(domain->reg_ack) & FORCEWAKE_KERNEL)
+			continue;
+		pr_err("%s: not acked\n", intel_uncore_forcewake_domain_to_str(domain->id));
+		err = -EINVAL;
+	}
+	if (err) {
+#if defined(CONFIG_DRM_I915_DEBUG_WAKEREF) // Ugly test of presence of intel_klog_error_capture
+		intel_klog_error_capture(gt, (intel_engine_mask_t) ~0U);
+#else
+		pr_err("Time to catch GuC logs.\n");
+		msleep(4000);
+#endif
+	}
+	msleep(3);
+	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+
+err_spin:
+	igt_spinner_fini(&spin);
+err_rpm:
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+	return err;
+}
+
+static int live_forcewake_with_spinners_25s(void *arg)
+{
+	ktime_t t = ktime_get();
+	int err = 0;
+
+	while (ktime_ms_delta(ktime_get(), t) < 25000) {
+		err = live_forcewake_with_spinners(arg);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
 int intel_uncore_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_fw_table),
 		SUBTEST(live_forcewake_ops),
 		SUBTEST(live_forcewake_domains),
+		SUBTEST(live_forcewake_with_spinners),
+		SUBTEST(live_forcewake_with_spinners_25s),
 	};
 
 	return intel_gt_live_subtests(tests, to_gt(i915));