diff mbox

[i-g-t,v2,2/2] tests/gem_mmap_gtt: add huge BO test

Message ID 1428490312.7407.10.camel@jlahtine-mobl1 (mailing list archive)
State New, archived
Headers show

Commit Message

Joonas Lahtinen April 8, 2015, 10:51 a.m. UTC
Add a straightforward test that allocates a BO that is bigger than
(by 1 page currently) the mappable aperture, tests mmap access to it
by CPU directly and through GTT in sequence.

Currently it is expected for the GTT access to gracefully fail as
all objects are attempted to get pinned to GTT completely for mmap
access. Once the partial view support is merged to kernel, the test
should pass for all parts.

v2:
- Corrected BO domain handling (Chris Wilson)
- Check again after GTT access for added paranoia (Chris Wilson)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 tests/gem_mmap_gtt.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)

Comments

Chris Wilson April 8, 2015, 11 a.m. UTC | #1
On Wed, Apr 08, 2015 at 01:51:52PM +0300, Joonas Lahtinen wrote:
> +	/* Test read/write to first/last page through CPU after GTT writes.
> +	 * Require that previous GTT written values still exist.
> +	 */
> +	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
> +				PROT_READ | PROT_WRITE);
> +	if (!ptr_cpu) {
> +		igt_warn("Not enough free memory to complete huge BO test!\n");
> +		goto out;
> +	}
> +
> +	set_domain_cpu(fd, bo);
> +
> +	igt_assert(memcmp(ptr_cpu              , gtt_pattern, PAGE_SIZE) == 0);
> +	igt_assert(memcmp(ptr_cpu + last_offset, gtt_pattern, PAGE_SIZE) == 0);
> +
> +	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu              , cpu_pattern, PAGE_SIZE) == 0);
> +	igt_assert(memcmp(ptr_cpu + last_offset, gtt_pattern, PAGE_SIZE) == 0);
> +
> +	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
> +	igt_assert(memcmp(ptr_cpu              , cpu_pattern, PAGE_SIZE) == 0);
> +	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);

On second thought I think this is better using pread as on non-llc
platforms this will a second gigantic clflush. Otherwise lgtm.
-Chris
diff mbox

Patch

diff --git a/tests/gem_mmap_gtt.c b/tests/gem_mmap_gtt.c
index d2803d7..c613a41 100644
--- a/tests/gem_mmap_gtt.c
+++ b/tests/gem_mmap_gtt.c
@@ -41,6 +41,10 @@ 
 #include "drmtest.h"
 #include "igt_debugfs.h"
 
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
 static int OBJECT_SIZE = 16*1024*1024;
 
 static void set_domain_gtt(int fd, uint32_t handle)
@@ -263,6 +267,104 @@  test_write_gtt(int fd)
 }
 
 static void
+test_huge_bo(int fd)
+{
+	uint32_t bo;
+	char *ptr_cpu;
+	char *ptr_gtt;
+	char *cpu_pattern;
+	char *gtt_pattern;
+	uint64_t mappable_aperture_pages = gem_mappable_aperture_size() /
+					   PAGE_SIZE;
+	uint64_t huge_object_size = (mappable_aperture_pages + 1) * PAGE_SIZE;
+	uint64_t last_offset = huge_object_size - PAGE_SIZE;
+
+	cpu_pattern = malloc(PAGE_SIZE);
+	gtt_pattern = malloc(PAGE_SIZE);
+	igt_assert(cpu_pattern && gtt_pattern);
+	memset(cpu_pattern,  0xaa, PAGE_SIZE);
+	memset(gtt_pattern, ~0xaa, PAGE_SIZE);
+
+	bo = gem_create(fd, huge_object_size);
+
+	/* Test read/write to first/last page with CPU. */
+	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
+				PROT_READ | PROT_WRITE);
+	if (!ptr_cpu) {
+		igt_warn("Not enough free memory to begin huge BO test!\n");
+		goto out;
+	}
+
+	set_domain_cpu(fd, bo);
+
+	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);
+
+	munmap(ptr_cpu, huge_object_size);
+	ptr_cpu = NULL;
+
+	/* Test read/write to first/last page through GTT after CPU writes.
+	 * Require that previous CPU written values still exist.
+	 */
+	ptr_gtt = gem_mmap__gtt(fd, bo, huge_object_size,
+			        PROT_READ | PROT_WRITE);
+	if (!ptr_gtt) {
+		igt_debug("Huge BO GTT mapping not supported!\n");
+		goto out;
+	}
+
+	set_domain_gtt(fd, bo);
+
+	igt_assert(memcmp(ptr_gtt              , cpu_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_gtt, gtt_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_gtt              , gtt_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_gtt + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_gtt + last_offset, gtt_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_gtt              , gtt_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_gtt + last_offset, gtt_pattern, PAGE_SIZE) == 0);
+
+	munmap(ptr_gtt, huge_object_size);
+	ptr_gtt = NULL;
+
+	/* Test read/write to first/last page through CPU after GTT writes.
+	 * Require that previous GTT written values still exist.
+	 */
+	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
+				PROT_READ | PROT_WRITE);
+	if (!ptr_cpu) {
+		igt_warn("Not enough free memory to complete huge BO test!\n");
+		goto out;
+	}
+
+	set_domain_cpu(fd, bo);
+
+	igt_assert(memcmp(ptr_cpu              , gtt_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_cpu + last_offset, gtt_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu              , cpu_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_cpu + last_offset, gtt_pattern, PAGE_SIZE) == 0);
+
+	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
+	igt_assert(memcmp(ptr_cpu              , cpu_pattern, PAGE_SIZE) == 0);
+	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);
+
+	munmap(ptr_cpu, huge_object_size);
+	ptr_cpu = NULL;
+out:
+	gem_close(fd, bo);
+	free(cpu_pattern);
+}
+
+static void
 test_read(int fd)
 {
 	void *dst;
@@ -400,6 +502,8 @@  igt_main
 		run_without_prefault(fd, test_write_gtt);
 	igt_subtest("write-cpu-read-gtt")
 		test_write_cpu_read_gtt(fd);
+	igt_subtest("huge-bo")
+		test_huge_bo(fd);
 
 	igt_fixture
 		close(fd);