diff mbox

[RFC,i-g-t] igt/gem_workarounds: Test all types of workarounds

Message ID 1507582731-20177-1-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com Oct. 9, 2017, 8:58 p.m. UTC
Apart from context based workarounds, we can now also test for global
MMIO and whitelisting ones.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 tests/gem_workarounds.c | 194 ++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 156 insertions(+), 38 deletions(-)

Comments

Chris Wilson Oct. 9, 2017, 9:20 p.m. UTC | #1
Quoting Oscar Mateo (2017-10-09 21:58:51)
> Apart from context based workarounds, we can now also test for global
> MMIO and whitelisting ones.

One thing to note here is that using the kernel list still has the
reliance on it being trustworthy and complete. You already noted one w/a
that is applied by the BIOS (and cannot be applied by the kernel). If
this was intended to be a tool that reported whether the system had all
known workarounds, it should be kernel independent. (Atm the igt goal is
simply be a regression test that the kernel w/a stick.)

> +static void wait_gpu(void)
> +{
> +       int fd = drm_open_driver(DRIVER_INTEL);
> +       gem_quiescent_gpu(fd);
> +       close(fd);
> +}

> +static int mmio_workarounds_fail_count(struct intel_wa_reg *wa_regs, int num_wa_regs)
> +{
> +       int i, fail_count = 0;
> +
> +       if (!num_wa_regs)
> +               return 0;
> +
> +       /*
> +        * There is a small delay after coming ot of rc6 to the correct
> +        * render context values will get loaded by hardware (bdw,chv).
> +        * This here ensures that we have the correct context loaded before
> +        * we start to read values
> +        */
> +       wait_gpu();

Hmm. So my goal for gem_quiescent_gpu() is not to issue a new request,
i.e. if the hw is idle, it will remain idle and not necessarily start a
new context (or kick rc6).

If we need an active gpu context whilst doing the mmio, kick off a
spinbatch here. (We should augment that to allow us to wait until it has
started). Then kill the spinbatch after the reads.

Otherwise we just have the problem we had before on bxt where we did the
mmio long before the gpu had started.
-Chris
diff mbox

Patch

diff --git a/tests/gem_workarounds.c b/tests/gem_workarounds.c
index 7b99961..dbcf1f3 100644
--- a/tests/gem_workarounds.c
+++ b/tests/gem_workarounds.c
@@ -28,6 +28,7 @@ 
 #include "igt.h"
 
 #include <fcntl.h>
+#include <ctype.h>
 
 #define PAGE_SIZE 4096
 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
@@ -62,8 +63,21 @@  static struct write_only_list {
 	 */
 };
 
-static struct intel_wa_reg *wa_regs;
-static int num_wa_regs;
+static struct intel_wa_reg *ctx_wa_regs;
+static int num_ctx_wa_regs;
+
+static struct intel_wa_reg *mmio_wa_regs;
+static int num_mmio_wa_regs;
+
+static struct intel_wa_reg *whitelist_wa_regs;
+static int num_whitelist_wa_regs;
+
+static void wait_gpu(void)
+{
+	int fd = drm_open_driver(DRIVER_INTEL);
+	gem_quiescent_gpu(fd);
+	close(fd);
+}
 
 static bool write_only(const uint32_t addr)
 {
@@ -82,7 +96,7 @@  static bool write_only(const uint32_t addr)
 
 #define MI_STORE_REGISTER_MEM (0x24 << 23)
 
-static int workaround_fail_count(int fd, uint32_t ctx)
+static int ctx_workarounds_fail_count(int fd, uint32_t ctx)
 {
 	struct drm_i915_gem_exec_object2 obj[2];
 	struct drm_i915_gem_relocation_entry *reloc;
@@ -91,13 +105,16 @@  static int workaround_fail_count(int fd, uint32_t ctx)
 	uint32_t *base, *out;
 	int fail_count = 0;
 
-	reloc = calloc(num_wa_regs, sizeof(*reloc));
+	if (!num_ctx_wa_regs)
+		return 0;
+
+	reloc = calloc(num_ctx_wa_regs, sizeof(*reloc));
 	igt_assert(reloc);
 
-	result_sz = 4 * num_wa_regs;
+	result_sz = 4 * num_ctx_wa_regs;
 	result_sz = PAGE_ALIGN(result_sz);
 
-	batch_sz = 16 * num_wa_regs + 4;
+	batch_sz = 16 * num_ctx_wa_regs + 4;
 	batch_sz = PAGE_ALIGN(batch_sz);
 
 	memset(obj, 0, sizeof(obj));
@@ -105,12 +122,12 @@  static int workaround_fail_count(int fd, uint32_t ctx)
 	gem_set_caching(fd, obj[0].handle, I915_CACHING_CACHED);
 	obj[1].handle = gem_create(fd, batch_sz);
 	obj[1].relocs_ptr = to_user_pointer(reloc);
-	obj[1].relocation_count = num_wa_regs;
+	obj[1].relocation_count = num_ctx_wa_regs;
 
 	out = base = gem_mmap__cpu(fd, obj[1].handle, 0, batch_sz, PROT_WRITE);
-	for (int i = 0; i < num_wa_regs; i++) {
+	for (int i = 0; i < num_ctx_wa_regs; i++) {
 		*out++ = MI_STORE_REGISTER_MEM | ((gen >= 8 ? 4 : 2) - 2);
-		*out++ = wa_regs[i].addr;
+		*out++ = ctx_wa_regs[i].addr;
 		reloc[i].target_handle = obj[0].handle;
 		reloc[i].offset = (out - base) * sizeof(*out);
 		reloc[i].delta = i * sizeof(uint32_t);
@@ -134,20 +151,20 @@  static int workaround_fail_count(int fd, uint32_t ctx)
 	igt_debug("Address\tval\t\tmask\t\tread\t\tresult\n");
 
 	out = gem_mmap__cpu(fd, obj[0].handle, 0, result_sz, PROT_READ);
-	for (int i = 0; i < num_wa_regs; i++) {
+	for (int i = 0; i < num_ctx_wa_regs; i++) {
 		const bool ok =
-			(wa_regs[i].value & wa_regs[i].mask) ==
-			(out[i] & wa_regs[i].mask);
+			(ctx_wa_regs[i].value & ctx_wa_regs[i].mask) ==
+			(out[i] & ctx_wa_regs[i].mask);
 		char buf[80];
 
 		snprintf(buf, sizeof(buf),
 			 "0x%05X\t0x%08X\t0x%08X\t0x%08X",
-			 wa_regs[i].addr, wa_regs[i].value, wa_regs[i].mask,
+			 ctx_wa_regs[i].addr, ctx_wa_regs[i].value, ctx_wa_regs[i].mask,
 			 out[i]);
 
 		if (ok) {
 			igt_debug("%s\tOK\n", buf);
-		} else if (write_only(wa_regs[i].addr)) {
+		} else if (write_only(ctx_wa_regs[i].addr)) {
 			igt_debug("%s\tIGNORED (w/o)\n", buf);
 		} else {
 			igt_warn("%s\tFAIL\n", buf);
@@ -163,6 +180,57 @@  static int workaround_fail_count(int fd, uint32_t ctx)
 	return fail_count;
 }
 
+static int mmio_workarounds_fail_count(struct intel_wa_reg *wa_regs, int num_wa_regs)
+{
+	int i, fail_count = 0;
+
+	if (!num_wa_regs)
+		return 0;
+
+	/*
+	 * There is a small delay after coming ot of rc6 to the correct
+	 * render context values will get loaded by hardware (bdw,chv).
+	 * This here ensures that we have the correct context loaded before
+	 * we start to read values
+	 */
+	wait_gpu();
+
+	igt_debug("Address\tval\t\tmask\t\tread\t\tresult\n");
+
+	for (i = 0; i < num_wa_regs; ++i) {
+		const uint32_t val = intel_register_read(wa_regs[i].addr);
+		const bool ok = (wa_regs[i].value & wa_regs[i].mask) ==
+			(val & wa_regs[i].mask);
+
+		igt_debug("0x%05X\t0x%08X\t0x%08X\t0x%08X\t%s\n",
+			  wa_regs[i].addr, wa_regs[i].value,
+			  wa_regs[i].mask, val, ok ? "OK" : "FAIL");
+
+		if (write_only(wa_regs[i].addr))
+			continue;
+
+		if (!ok) {
+			igt_warn("0x%05X\t0x%08X\t0x%08X\t0x%08X\t%s\n",
+				 wa_regs[i].addr, wa_regs[i].value,
+				 wa_regs[i].mask, val, ok ? "OK" : "FAIL");
+			fail_count++;
+		}
+	}
+
+	return fail_count;
+}
+
+static int workarounds_fail_count(int fd, uint32_t ctx)
+{
+	int fail_count = 0;
+
+	fail_count += ctx_workarounds_fail_count(fd, ctx);
+	fail_count += mmio_workarounds_fail_count(mmio_wa_regs, num_mmio_wa_regs);
+	fail_count += mmio_workarounds_fail_count(whitelist_wa_regs, num_whitelist_wa_regs);
+
+	return fail_count;
+}
+
 static int reopen(int fd)
 {
 	char path[256];
@@ -185,7 +253,7 @@  static void check_workarounds(int fd, enum operation op, unsigned int flags)
 	if (flags & CONTEXT)
 		ctx = gem_context_create(fd);
 
-	igt_assert_eq(workaround_fail_count(fd, ctx), 0);
+	igt_assert_eq(workarounds_fail_count(fd, ctx), 0);
 
 	switch (op) {
 	case GPU_RESET:
@@ -209,7 +277,7 @@  static void check_workarounds(int fd, enum operation op, unsigned int flags)
 		igt_assert(0);
 	}
 
-	igt_assert_eq(workaround_fail_count(fd, ctx), 0);
+	igt_assert_eq(workarounds_fail_count(fd, ctx), 0);
 
 	if (flags & CONTEXT)
 		gem_context_destroy(fd, ctx);
@@ -217,6 +285,62 @@  static void check_workarounds(int fd, enum operation op, unsigned int flags)
 		close(fd);
 }
 
+static bool is_empty(const char *s)
+{
+	while (*s != '\0') {
+		if (!isspace(*s))
+			return false;
+		s++;
+	}
+
+	return true;
+}
+
+static char *skip_preamble(char *s, const char *preamble)
+{
+	while (*s == *preamble) {
+		s++;
+		preamble++;
+	}
+
+	return s;
+}
+
+static void read_workarounds(FILE *file, const char *preamble, struct intel_wa_reg **regs, int *num)
+{
+	char *header, *line = NULL;
+	size_t line_size;
+	struct intel_wa_reg *wa_regs;
+	int num_wa_regs;
+	int i = 0;
+
+	igt_assert(getline(&line, &line_size, file) > 0);
+	igt_debug("i915_wa_registers: %s", line);
+	header = skip_preamble(line, preamble);
+	sscanf(header, " workarounds applied: %d", &num_wa_regs);
+
+	wa_regs = malloc(num_wa_regs * sizeof(*wa_regs));
+
+	while (getline(&line, &line_size, file) > 0) {
+		if (is_empty(line))
+			break;
+
+		igt_debug("%s", line);
+		if (sscanf(line, "0x%X: 0x%08X, mask: 0x%08X",
+			   &wa_regs[i].addr,
+			   &wa_regs[i].value,
+			   &wa_regs[i].mask) == 3)
+			i++;
+	}
+
+	igt_assert_lte(i, num_wa_regs);
+
+	*regs = wa_regs;
+	*num = num_wa_regs;
+
+	free(line);
+}
+
 igt_main
 {
 	int device = -1;
@@ -241,39 +365,28 @@  igt_main
 	}, *m;
 
 	igt_fixture {
+		struct pci_device *pci_dev;
 		FILE *file;
-		char *line = NULL;
-		size_t line_size;
-		int i, fd;
+		int fd;
 
 		device = drm_open_driver(DRIVER_INTEL);
 		igt_require_gem(device);
 
 		gen = intel_gen(intel_get_drm_devid(device));
 
+		pci_dev = intel_get_pci_device();
+		igt_require(pci_dev);
+
+		intel_register_access_init(pci_dev, 0, device);
+
 		fd = igt_debugfs_open(device, "i915_wa_registers", O_RDONLY);
 		file = fdopen(fd, "r");
-		igt_assert(getline(&line, &line_size, file) > 0);
-		igt_debug("i915_wa_registers: %s", line);
-		sscanf(line, "Workarounds applied: %d", &num_wa_regs);
-		igt_require(num_wa_regs > 0);
-
-		wa_regs = malloc(num_wa_regs * sizeof(*wa_regs));
-		igt_assert(wa_regs);
-
-		i = 0;
-		while (getline(&line, &line_size, file) > 0) {
-			igt_debug("%s", line);
-			if (sscanf(line, "0x%X: 0x%08X, mask: 0x%08X",
-				   &wa_regs[i].addr,
-				   &wa_regs[i].value,
-				   &wa_regs[i].mask) == 3)
-				i++;
-		}
 
-		igt_assert_lte(i, num_wa_regs);
+		read_workarounds(file, "Context", &ctx_wa_regs, &num_ctx_wa_regs);
+		read_workarounds(file, "MMIO", &mmio_wa_regs, &num_mmio_wa_regs);
+		read_workarounds(file, "Whitelist", &whitelist_wa_regs, &num_whitelist_wa_regs);
+		igt_require(num_ctx_wa_regs + num_mmio_wa_regs + num_whitelist_wa_regs > 0);
 
-		free(line);
 		fclose(file);
 		close(fd);
 	}
@@ -284,4 +397,9 @@  igt_main
 				check_workarounds(device, op->op, m->flags);
 		}
 	}
+
+	igt_fixture {
+		free(ctx_wa_regs);
+		free(mmio_wa_regs);
+	}
 }