diff mbox

[i-g-t,1/2] igt/gem_userptr: Exercise new PROBE | POPULATE flags

Message ID 20180615153250.29081-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson June 15, 2018, 3:32 p.m. UTC
Exercise new API to probe that the userptr range is valid (backed by
struct pages and not pfn) or to populate the userptr upon creation (by
calling get_user_pages() on the range).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: MichaƂ Winiarski <michal.winiarski@intel.com>
---
 tests/gem_userptr_blits.c | 140 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 140 insertions(+)
diff mbox

Patch

diff --git a/tests/gem_userptr_blits.c b/tests/gem_userptr_blits.c
index 7e3b6ef38..0c2bdf5b2 100644
--- a/tests/gem_userptr_blits.c
+++ b/tests/gem_userptr_blits.c
@@ -557,6 +557,140 @@  static int test_invalid_gtt_mapping(int fd)
 	return 0;
 }
 
+static void store_dword(int fd, uint32_t target,
+			uint32_t offset, uint32_t value)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t batch[16];
+	int i;
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(obj);
+	execbuf.buffer_count = ARRAY_SIZE(obj);
+	execbuf.flags = 0;
+	if (gen < 6)
+		execbuf.flags |= I915_EXEC_SECURE;
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = target;
+	obj[1].handle = gem_create(fd, 4096);
+
+	memset(&reloc, 0, sizeof(reloc));
+	reloc.target_handle = obj[0].handle;
+	reloc.presumed_offset = 0;
+	reloc.offset = sizeof(uint32_t);
+	reloc.delta = offset;
+	reloc.read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc.write_domain = I915_GEM_DOMAIN_RENDER;
+	obj[1].relocs_ptr = to_user_pointer(&reloc);
+	obj[1].relocation_count = 1;
+
+	i = 0;
+	batch[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+	if (gen >= 8) {
+		batch[++i] = offset;
+		batch[++i] = 0;
+	} else if (gen >= 4) {
+		batch[++i] = 0;
+		batch[++i] = offset;
+		reloc.offset += sizeof(uint32_t);
+	} else {
+		batch[i]--;
+		batch[++i] = offset;
+	}
+	batch[++i] = value;
+	batch[++i] = MI_BATCH_BUFFER_END;
+	gem_write(fd, obj[1].handle, 0, batch, sizeof(batch));
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, obj[1].handle);
+}
+
+#define LOCAL_USERPTR_PROBE 0x2
+#define LOCAL_USERPTR_POPULATE 0x4
+static void test_probe(int fd, unsigned int flags)
+{
+#define N_PAGES 5
+	struct drm_i915_gem_mmap_gtt mmap_gtt;
+	uint32_t handle;
+
+	igt_require(__gem_userptr(fd,
+				  (void *)-PAGE_SIZE, 2*PAGE_SIZE, 0,
+				  flags, &handle) == -EFAULT);
+
+	/*
+	 * We allocate 5 pages, and apply various combinations
+	 * of unmap, remap-gtt to the pages. Then we try to
+	 * create a userptr from the middle 3 pages and check
+	 * if unexpectedly succeeds or fails.
+	 */
+	memset(&mmap_gtt, 0, sizeof(mmap_gtt));
+	mmap_gtt.handle = gem_create(fd, PAGE_SIZE);
+	drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_gtt);
+
+	for (unsigned long pass = 0; pass < 4 * 4 * 4 * 4 *4; pass++) {
+		int expected = 0;
+		void *ptr;
+
+		ptr = mmap(NULL, N_PAGES * PAGE_SIZE,
+			   PROT_READ | PROT_WRITE,
+			   MAP_SHARED | MAP_ANONYMOUS,
+			   -1, 0);
+
+		for (int page = 0; page < N_PAGES; page++) {
+			int mode = (pass >> (2 * page)) & 3;
+			void *fixed = ptr + page * PAGE_SIZE;
+
+			switch (mode) {
+			default:
+			case 0:
+				break;
+
+			case 1:
+				munmap(fixed, PAGE_SIZE);
+				if (page >= 1 && page <= 3)
+					expected = -EFAULT;
+				break;
+
+			case 2:
+				fixed = mmap(fixed, PAGE_SIZE,
+					     PROT_READ | PROT_WRITE,
+					     MAP_SHARED | MAP_FIXED,
+					     fd, mmap_gtt.offset);
+				igt_assert(fixed != MAP_FAILED);
+				if (page >= 1 && page <= 3)
+					expected = -EFAULT;
+				break;
+			}
+		}
+
+		errno = 0;
+		handle = 0;
+		igt_assert_eq(__gem_userptr(fd, ptr + PAGE_SIZE, 3*PAGE_SIZE,
+					    0, flags, &handle),
+			      expected);
+		if (handle) {
+			for (int page = 0; page < 3; page++)
+				store_dword(fd, handle, page*4096, handle);
+
+			gem_sync(fd, handle);
+
+			for (int page = 1; page <= 3; page++)
+				igt_assert_eq(*(uint32_t *)(ptr + page*PAGE_SIZE),
+					      handle);
+
+			gem_close(fd, handle);
+		}
+
+		munmap(ptr, N_PAGES * PAGE_SIZE);
+	}
+
+	gem_close(fd, mmap_gtt.handle);
+#undef N_PAGES
+}
+
 #define PE_GTT_MAP 0x1
 #define PE_BUSY 0x2
 static void test_process_exit(int fd, int flags)
@@ -1464,6 +1598,12 @@  int main(int argc, char **argv)
 		igt_subtest("invalid-gtt-mapping")
 			test_invalid_gtt_mapping(fd);
 
+		igt_subtest("probe")
+			test_probe(fd, LOCAL_USERPTR_PROBE);
+
+		igt_subtest("populate")
+			test_probe(fd, LOCAL_USERPTR_POPULATE);
+
 		igt_subtest("forked-access")
 			test_forked_access(fd);