diff mbox

[i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)

Message ID 1448386923-18141-1-git-send-email-marius.c.vlad@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marius Vlad Nov. 24, 2015, 5:42 p.m. UTC
From: Marius Vlad <marius.c.vlad@intel.com>

Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
---
 tests/pm_rpm.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 90 insertions(+)

Comments

Imre Deak Nov. 24, 2015, 10:57 p.m. UTC | #1
Hi,

thanks for the patch. Looks ok in general, I have a few comments below.

On Tue, 2015-11-24 at 19:42 +0200, marius.c.vlad@intel.com wrote:
> From: Marius Vlad <marius.c.vlad@intel.com>
> 
> Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> ---
>  tests/pm_rpm.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 90 insertions(+)
> 
> diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> index c4fb19c..86d16ad 100644
> --- a/tests/pm_rpm.c
> +++ b/tests/pm_rpm.c
> @@ -1729,6 +1729,90 @@ static void planes_subtest(bool universal, bool dpms)
>  	}
>  }
>  
> +static void pm_test_tiling(void)
> +{
> +	uint32_t handle;
> +	uint8_t *gem_buf;
> +	uint32_t i, tiling_modes[3] = {
> +		I915_TILING_NONE,
> +		I915_TILING_X,
> +		I915_TILING_Y,
> +	};
> +	uint32_t ti, sw, j;
> +	uint32_t obj_size = (8 * 1024 * 1024);
> +
> +	handle = gem_create(drm_fd, obj_size);
> +
> +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		if (tiling_modes[i] == 0) {

Better not to hardcode.

> +			gem_set_tiling(drm_fd, handle, tiling_modes[i], 0);

You can just pass the same stride always, flattening the if-else.

> +
> +			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
> +					obj_size, PROT_WRITE);

Mapping to the CPU doesn't make a difference in this test case. On old
HW we could make sure that there will be an unbind during the IOCTL,
for that you need an alignment that isn't valid for the tiling mode.
The easiest would be to map a few smaller sized objects to GGTT and do
a memset on each, not sure if there is a more precise way. Also this
should be done before calling gem_set_tiling()
and disable_all_screens_and_wait().

> +
> +			for (j = 0; j < obj_size; j++)
> +				gem_buf[j] = j & 0xff;
> +
> +			igt_assert(munmap(gem_buf, obj_size) == 0);
> +
> +			gem_get_tiling(drm_fd, handle, &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		} else {
> +			gem_set_tiling(drm_fd, handle, tiling_modes[i], 512);
> +			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
> +					obj_size, PROT_WRITE);
> +
> +			for (j = 0; j < obj_size; j++)
> +				gem_buf[j] = j & 0xff;
> +
> +			igt_assert(munmap(gem_buf, obj_size) == 0);
> +
> +			gem_get_tiling(drm_fd, handle, &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);
> +	}
> +
> +	gem_close(drm_fd, handle);
> +}
> +
> +static void pm_test_caching(void)
> +{
> +	uint32_t handle, got_caching, obj_size = (8 * 1024 * 1024);
> +	void *src_buf;
> +	uint32_t i, cache_levels[3] = {
> +		I915_CACHING_NONE,
> +		I915_CACHING_CACHED,
> +		I915_CACHING_DISPLAY,
> +	};
> +
> +	handle = gem_create(drm_fd, obj_size);
> +	src_buf = malloc(obj_size);
> +
> +	memset(src_buf, 0x65, obj_size);
> +
> +	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		gem_set_caching(drm_fd, handle, cache_levels[i]);
> +		gem_write(drm_fd, handle, 0, src_buf, obj_size);

Similarly as above we need to force an unbind here, by mapping to GGTT
and doing a memset on it before calling disable_all_screens_and_wait().
We don't need a specific object alignment here.

> +
> +		got_caching = gem_get_caching(drm_fd, handle);
> +
> +		enable_one_screen_and_wait(&ms_data);
> +
> +		/* skip CACHING_DISPLAY, some platforms do not have it */
> +		if (i != 2)

Better not to hardcode.

> +			igt_assert(got_caching == cache_levels[i]);

You could make it more precise by requiring either CACHING_DISPLAY or
CACHING_NONE in this case.

--Imre

> +	}
> +
> +	free(src_buf);
> +	gem_close(drm_fd, handle);
> +}
> +
>  static void fences_subtest(bool dpms)
>  {
>  	int i;
> @@ -1927,6 +2011,12 @@ int main(int argc, char *argv[])
>  	igt_subtest("gem-execbuf-stress-extra-wait")
>  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
>  
> +	/* power-wake reference tests */
> +	igt_subtest("pm-tiling")
> +		pm_test_tiling();
> +	igt_subtest("pm-caching")
> +		pm_test_caching();
> +
>  	igt_fixture
>  		teardown_environment();
>
Marius Vlad Nov. 25, 2015, 5:16 p.m. UTC | #2
Second attempt using Imres' hints.
Marius Vlad Nov. 27, 2015, 6:08 p.m. UTC | #3
v4: re-bind the gem objects each time before calling
disable_all_screens_and_wait().

v3: Use smaller sizes when allocating gem objects for caching tests.

v2: use mmap to gtt instead off cpu and various style-changes.

Reviewed-by: Imre Deak <imre.deak@intel.com>
diff mbox

Patch

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..86d16ad 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,90 @@  static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t handle;
+	uint8_t *gem_buf;
+	uint32_t i, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw, j;
+	uint32_t obj_size = (8 * 1024 * 1024);
+
+	handle = gem_create(drm_fd, obj_size);
+
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		if (tiling_modes[i] == 0) {
+			gem_set_tiling(drm_fd, handle, tiling_modes[i], 0);
+
+			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
+					obj_size, PROT_WRITE);
+
+			for (j = 0; j < obj_size; j++)
+				gem_buf[j] = j & 0xff;
+
+			igt_assert(munmap(gem_buf, obj_size) == 0);
+
+			gem_get_tiling(drm_fd, handle, &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		} else {
+			gem_set_tiling(drm_fd, handle, tiling_modes[i], 512);
+			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
+					obj_size, PROT_WRITE);
+
+			for (j = 0; j < obj_size; j++)
+				gem_buf[j] = j & 0xff;
+
+			igt_assert(munmap(gem_buf, obj_size) == 0);
+
+			gem_get_tiling(drm_fd, handle, &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	gem_close(drm_fd, handle);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t handle, got_caching, obj_size = (8 * 1024 * 1024);
+	void *src_buf;
+	uint32_t i, cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,
+		I915_CACHING_DISPLAY,
+	};
+
+	handle = gem_create(drm_fd, obj_size);
+	src_buf = malloc(obj_size);
+
+	memset(src_buf, 0x65, obj_size);
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		gem_set_caching(drm_fd, handle, cache_levels[i]);
+		gem_write(drm_fd, handle, 0, src_buf, obj_size);
+
+		got_caching = gem_get_caching(drm_fd, handle);
+
+		enable_one_screen_and_wait(&ms_data);
+
+		/* skip CACHING_DISPLAY, some platforms do not have it */
+		if (i != 2)
+			igt_assert(got_caching == cache_levels[i]);
+	}
+
+	free(src_buf);
+	gem_close(drm_fd, handle);
+}
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2011,12 @@  int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();