diff mbox

[3/7] prime_mmap: Add basic tests to write in a bo using CPU

Message ID 1439422160-20148-8-git-send-email-tiago.vignatti@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tiago Vignatti Aug. 12, 2015, 11:29 p.m. UTC
This patch adds test_correct_cpu_write, which maps the texture buffer through a
prime fd and then writes directly to it using the CPU. It stresses the driver
to guarantee cache synchronization among the different domains.

This test also adds test_forked_cpu_write, which creates the GEM bo in one
process and pass the prime handle of the it to another process, which in turn
uses the handle only to map and write. Grossly speaking this test simulates
Chrome OS  architecture, where the Web content ("unpriviledged process") maps
and CPU-draws a buffer, which was previously allocated in the GPU process
("priviledged process").

This requires kernel modifications (Daniel Thompson's "drm: prime: Honour
O_RDWR during prime-handle-to-fd").

Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
---
 lib/ioctl_wrappers.c |  5 +++-
 tests/prime_mmap.c   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 69 insertions(+), 1 deletion(-)

Comments

Daniel Vetter Aug. 13, 2015, 7:01 a.m. UTC | #1
On Wed, Aug 12, 2015 at 08:29:16PM -0300, Tiago Vignatti wrote:
> This patch adds test_correct_cpu_write, which maps the texture buffer through a
> prime fd and then writes directly to it using the CPU. It stresses the driver
> to guarantee cache synchronization among the different domains.
> 
> This test also adds test_forked_cpu_write, which creates the GEM bo in one
> process and pass the prime handle of the it to another process, which in turn
> uses the handle only to map and write. Grossly speaking this test simulates
> Chrome OS  architecture, where the Web content ("unpriviledged process") maps
> and CPU-draws a buffer, which was previously allocated in the GPU process
> ("priviledged process").
> 
> This requires kernel modifications (Daniel Thompson's "drm: prime: Honour
> O_RDWR during prime-handle-to-fd").
> 
> Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>

Squash with previous patch?
> ---
>  lib/ioctl_wrappers.c |  5 +++-
>  tests/prime_mmap.c   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 69 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
> index 53bd635..941fa66 100644
> --- a/lib/ioctl_wrappers.c
> +++ b/lib/ioctl_wrappers.c
> @@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id)
>  
>  /* prime */
>  
> +#ifndef DRM_RDWR
> +#define DRM_RDWR O_RDWR
> +#endif
>  /**
>   * prime_handle_to_fd:
>   * @fd: open i915 drm file descriptor
> @@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle)
>  
>  	memset(&args, 0, sizeof(args));
>  	args.handle = handle;
> -	args.flags = DRM_CLOEXEC;
> +	args.flags = DRM_CLOEXEC | DRM_RDWR;

This needs to be optional otherwise all the existing prime tests start
falling over on older kernels. Probably need a
prime_handle_to_fd_with_mmap, which doesn an igt_skip if it fails.
-Daniel

>  	args.fd = -1;
>  
>  	do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
> diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c
> index dc59e8f..ad91371 100644
> --- a/tests/prime_mmap.c
> +++ b/tests/prime_mmap.c
> @@ -22,6 +22,7 @@
>   *
>   * Authors:
>   *    Rob Bradford <rob at linux.intel.com>
> + *    Tiago Vignatti <tiago.vignatti at intel.com>
>   *
>   */
>  
> @@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size)
>  }
>  
>  static void
> +fill_bo_cpu(char *ptr)
> +{
> +	memcpy(ptr, pattern, sizeof(pattern));
> +}
> +
> +static void
>  test_correct(void)
>  {
>  	int dma_buf_fd;
> @@ -180,6 +187,62 @@ test_forked(void)
>  	gem_close(fd, handle);
>  }
>  
> +/* test CPU write. This has a rather big implication for the driver which must
> + * guarantee cache synchronization when writing the bo using CPU. */
> +static void
> +test_correct_cpu_write(void)
> +{
> +	int dma_buf_fd;
> +	char *ptr;
> +	uint32_t handle;
> +
> +	handle = gem_create(fd, BO_SIZE);
> +
> +	dma_buf_fd = prime_handle_to_fd(fd, handle);
> +	igt_assert(errno == 0);
> +
> +	/* Check correctness of map using write protection (PROT_WRITE) */
> +	ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
> +	igt_assert(ptr != MAP_FAILED);
> +
> +	/* Fill bo using CPU */
> +	fill_bo_cpu(ptr);
> +
> +	/* Check pattern correctness */
> +	igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
> +
> +	munmap(ptr, BO_SIZE);
> +	close(dma_buf_fd);
> +	gem_close(fd, handle);
> +}
> +
> +/* map from another process and then write using CPU */
> +static void
> +test_forked_cpu_write(void)
> +{
> +	int dma_buf_fd;
> +	char *ptr;
> +	uint32_t handle;
> +
> +	handle = gem_create(fd, BO_SIZE);
> +
> +	dma_buf_fd = prime_handle_to_fd(fd, handle);
> +	igt_assert(errno == 0);
> +
> +	igt_fork(childno, 1) {
> +		ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
> +		igt_assert(ptr != MAP_FAILED);
> +		fill_bo_cpu(ptr);
> +
> +		igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
> +		munmap(ptr, BO_SIZE);
> +		close(dma_buf_fd);
> +	}
> +	close(dma_buf_fd);
> +	igt_waitchildren();
> +	gem_close(fd, handle);
> +}
> +
>  static void
>  test_refcounting(void)
>  {
> @@ -346,6 +409,8 @@ igt_main
>  		{ "test_map_unmap", test_map_unmap },
>  		{ "test_reprime", test_reprime },
>  		{ "test_forked", test_forked },
> +		{ "test_correct_cpu_write", test_correct_cpu_write },
> +		{ "test_forked_cpu_write", test_forked_cpu_write },
>  		{ "test_refcounting", test_refcounting },
>  		{ "test_dup", test_dup },
>  		{ "test_errors", test_errors },
> -- 
> 2.1.0
>
Tiago Vignatti Aug. 13, 2015, 2:26 p.m. UTC | #2
On 08/13/2015 04:01 AM, Daniel Vetter wrote:
> On Wed, Aug 12, 2015 at 08:29:16PM -0300, Tiago Vignatti wrote:
>> This patch adds test_correct_cpu_write, which maps the texture buffer through a
>> prime fd and then writes directly to it using the CPU. It stresses the driver
>> to guarantee cache synchronization among the different domains.
>>
>> This test also adds test_forked_cpu_write, which creates the GEM bo in one
>> process and pass the prime handle of the it to another process, which in turn
>> uses the handle only to map and write. Grossly speaking this test simulates
>> Chrome OS  architecture, where the Web content ("unpriviledged process") maps
>> and CPU-draws a buffer, which was previously allocated in the GPU process
>> ("priviledged process").
>>
>> This requires kernel modifications (Daniel Thompson's "drm: prime: Honour
>> O_RDWR during prime-handle-to-fd").
>>
>> Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
>
> Squash with previous patch?

why? if the whole point is to decrease the amount of patches, then I 
prefer to squash 2/7 with the 1/7 (although they're from different 
authors and would be nice to keep separately the changes from each). 
This patch here introduces this writing to mmap'ed dma-buf fd, a concept 
that is still in debate, requiring a kernel counter-part so that's why I 
preferred to keep it away.


>> ---
>>   lib/ioctl_wrappers.c |  5 +++-
>>   tests/prime_mmap.c   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>   2 files changed, 69 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
>> index 53bd635..941fa66 100644
>> --- a/lib/ioctl_wrappers.c
>> +++ b/lib/ioctl_wrappers.c
>> @@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id)
>>
>>   /* prime */
>>
>> +#ifndef DRM_RDWR
>> +#define DRM_RDWR O_RDWR
>> +#endif
>>   /**
>>    * prime_handle_to_fd:
>>    * @fd: open i915 drm file descriptor
>> @@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle)
>>
>>   	memset(&args, 0, sizeof(args));
>>   	args.handle = handle;
>> -	args.flags = DRM_CLOEXEC;
>> +	args.flags = DRM_CLOEXEC | DRM_RDWR;
>
> This needs to be optional otherwise all the existing prime tests start
> falling over on older kernels. Probably need a
> prime_handle_to_fd_with_mmap, which doesn an igt_skip if it fails.

true. Thank you.


> -Daniel
>
>>   	args.fd = -1;
>>
>>   	do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
>> diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c
>> index dc59e8f..ad91371 100644
>> --- a/tests/prime_mmap.c
>> +++ b/tests/prime_mmap.c
>> @@ -22,6 +22,7 @@
>>    *
>>    * Authors:
>>    *    Rob Bradford <rob at linux.intel.com>
>> + *    Tiago Vignatti <tiago.vignatti at intel.com>
>>    *
>>    */
>>
>> @@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size)
>>   }
>>
>>   static void
>> +fill_bo_cpu(char *ptr)
>> +{
>> +	memcpy(ptr, pattern, sizeof(pattern));
>> +}
>> +
>> +static void
>>   test_correct(void)
>>   {
>>   	int dma_buf_fd;
>> @@ -180,6 +187,62 @@ test_forked(void)
>>   	gem_close(fd, handle);
>>   }
>>
>> +/* test CPU write. This has a rather big implication for the driver which must
>> + * guarantee cache synchronization when writing the bo using CPU. */
>> +static void
>> +test_correct_cpu_write(void)
>> +{
>> +	int dma_buf_fd;
>> +	char *ptr;
>> +	uint32_t handle;
>> +
>> +	handle = gem_create(fd, BO_SIZE);
>> +
>> +	dma_buf_fd = prime_handle_to_fd(fd, handle);
>> +	igt_assert(errno == 0);
>> +
>> +	/* Check correctness of map using write protection (PROT_WRITE) */
>> +	ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
>> +	igt_assert(ptr != MAP_FAILED);
>> +
>> +	/* Fill bo using CPU */
>> +	fill_bo_cpu(ptr);
>> +
>> +	/* Check pattern correctness */
>> +	igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
>> +
>> +	munmap(ptr, BO_SIZE);
>> +	close(dma_buf_fd);
>> +	gem_close(fd, handle);
>> +}
>> +
>> +/* map from another process and then write using CPU */
>> +static void
>> +test_forked_cpu_write(void)
>> +{
>> +	int dma_buf_fd;
>> +	char *ptr;
>> +	uint32_t handle;
>> +
>> +	handle = gem_create(fd, BO_SIZE);
>> +
>> +	dma_buf_fd = prime_handle_to_fd(fd, handle);
>> +	igt_assert(errno == 0);
>> +
>> +	igt_fork(childno, 1) {
>> +		ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
>> +		igt_assert(ptr != MAP_FAILED);
>> +		fill_bo_cpu(ptr);
>> +
>> +		igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
>> +		munmap(ptr, BO_SIZE);
>> +		close(dma_buf_fd);
>> +	}
>> +	close(dma_buf_fd);
>> +	igt_waitchildren();
>> +	gem_close(fd, handle);
>> +}
>> +
>>   static void
>>   test_refcounting(void)
>>   {
>> @@ -346,6 +409,8 @@ igt_main
>>   		{ "test_map_unmap", test_map_unmap },
>>   		{ "test_reprime", test_reprime },
>>   		{ "test_forked", test_forked },
>> +		{ "test_correct_cpu_write", test_correct_cpu_write },
>> +		{ "test_forked_cpu_write", test_forked_cpu_write },
>>   		{ "test_refcounting", test_refcounting },
>>   		{ "test_dup", test_dup },
>>   		{ "test_errors", test_errors },
>> --
>> 2.1.0
>>
>
Daniel Vetter Aug. 13, 2015, 2:48 p.m. UTC | #3
On Thu, Aug 13, 2015 at 11:26:57AM -0300, Tiago Vignatti wrote:
> On 08/13/2015 04:01 AM, Daniel Vetter wrote:
> >On Wed, Aug 12, 2015 at 08:29:16PM -0300, Tiago Vignatti wrote:
> >>This patch adds test_correct_cpu_write, which maps the texture buffer through a
> >>prime fd and then writes directly to it using the CPU. It stresses the driver
> >>to guarantee cache synchronization among the different domains.
> >>
> >>This test also adds test_forked_cpu_write, which creates the GEM bo in one
> >>process and pass the prime handle of the it to another process, which in turn
> >>uses the handle only to map and write. Grossly speaking this test simulates
> >>Chrome OS  architecture, where the Web content ("unpriviledged process") maps
> >>and CPU-draws a buffer, which was previously allocated in the GPU process
> >>("priviledged process").
> >>
> >>This requires kernel modifications (Daniel Thompson's "drm: prime: Honour
> >>O_RDWR during prime-handle-to-fd").
> >>
> >>Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
> >
> >Squash with previous patch?
> 
> why? if the whole point is to decrease the amount of patches, then I prefer
> to squash 2/7 with the 1/7 (although they're from different authors and
> would be nice to keep separately the changes from each). This patch here
> introduces this writing to mmap'ed dma-buf fd, a concept that is still in
> debate, requiring a kernel counter-part so that's why I preferred to keep it
> away.

Replied to the wrong patch, I meant merging patch 1&2 ofc ;-)
-Daniel

> 
> 
> >>---
> >>  lib/ioctl_wrappers.c |  5 +++-
> >>  tests/prime_mmap.c   | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >>  2 files changed, 69 insertions(+), 1 deletion(-)
> >>
> >>diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
> >>index 53bd635..941fa66 100644
> >>--- a/lib/ioctl_wrappers.c
> >>+++ b/lib/ioctl_wrappers.c
> >>@@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id)
> >>
> >>  /* prime */
> >>
> >>+#ifndef DRM_RDWR
> >>+#define DRM_RDWR O_RDWR
> >>+#endif
> >>  /**
> >>   * prime_handle_to_fd:
> >>   * @fd: open i915 drm file descriptor
> >>@@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle)
> >>
> >>  	memset(&args, 0, sizeof(args));
> >>  	args.handle = handle;
> >>-	args.flags = DRM_CLOEXEC;
> >>+	args.flags = DRM_CLOEXEC | DRM_RDWR;
> >
> >This needs to be optional otherwise all the existing prime tests start
> >falling over on older kernels. Probably need a
> >prime_handle_to_fd_with_mmap, which doesn an igt_skip if it fails.
> 
> true. Thank you.
> 
> 
> >-Daniel
> >
> >>  	args.fd = -1;
> >>
> >>  	do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
> >>diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c
> >>index dc59e8f..ad91371 100644
> >>--- a/tests/prime_mmap.c
> >>+++ b/tests/prime_mmap.c
> >>@@ -22,6 +22,7 @@
> >>   *
> >>   * Authors:
> >>   *    Rob Bradford <rob at linux.intel.com>
> >>+ *    Tiago Vignatti <tiago.vignatti at intel.com>
> >>   *
> >>   */
> >>
> >>@@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size)
> >>  }
> >>
> >>  static void
> >>+fill_bo_cpu(char *ptr)
> >>+{
> >>+	memcpy(ptr, pattern, sizeof(pattern));
> >>+}
> >>+
> >>+static void
> >>  test_correct(void)
> >>  {
> >>  	int dma_buf_fd;
> >>@@ -180,6 +187,62 @@ test_forked(void)
> >>  	gem_close(fd, handle);
> >>  }
> >>
> >>+/* test CPU write. This has a rather big implication for the driver which must
> >>+ * guarantee cache synchronization when writing the bo using CPU. */
> >>+static void
> >>+test_correct_cpu_write(void)
> >>+{
> >>+	int dma_buf_fd;
> >>+	char *ptr;
> >>+	uint32_t handle;
> >>+
> >>+	handle = gem_create(fd, BO_SIZE);
> >>+
> >>+	dma_buf_fd = prime_handle_to_fd(fd, handle);
> >>+	igt_assert(errno == 0);
> >>+
> >>+	/* Check correctness of map using write protection (PROT_WRITE) */
> >>+	ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
> >>+	igt_assert(ptr != MAP_FAILED);
> >>+
> >>+	/* Fill bo using CPU */
> >>+	fill_bo_cpu(ptr);
> >>+
> >>+	/* Check pattern correctness */
> >>+	igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
> >>+
> >>+	munmap(ptr, BO_SIZE);
> >>+	close(dma_buf_fd);
> >>+	gem_close(fd, handle);
> >>+}
> >>+
> >>+/* map from another process and then write using CPU */
> >>+static void
> >>+test_forked_cpu_write(void)
> >>+{
> >>+	int dma_buf_fd;
> >>+	char *ptr;
> >>+	uint32_t handle;
> >>+
> >>+	handle = gem_create(fd, BO_SIZE);
> >>+
> >>+	dma_buf_fd = prime_handle_to_fd(fd, handle);
> >>+	igt_assert(errno == 0);
> >>+
> >>+	igt_fork(childno, 1) {
> >>+		ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
> >>+		igt_assert(ptr != MAP_FAILED);
> >>+		fill_bo_cpu(ptr);
> >>+
> >>+		igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
> >>+		munmap(ptr, BO_SIZE);
> >>+		close(dma_buf_fd);
> >>+	}
> >>+	close(dma_buf_fd);
> >>+	igt_waitchildren();
> >>+	gem_close(fd, handle);
> >>+}
> >>+
> >>  static void
> >>  test_refcounting(void)
> >>  {
> >>@@ -346,6 +409,8 @@ igt_main
> >>  		{ "test_map_unmap", test_map_unmap },
> >>  		{ "test_reprime", test_reprime },
> >>  		{ "test_forked", test_forked },
> >>+		{ "test_correct_cpu_write", test_correct_cpu_write },
> >>+		{ "test_forked_cpu_write", test_forked_cpu_write },
> >>  		{ "test_refcounting", test_refcounting },
> >>  		{ "test_dup", test_dup },
> >>  		{ "test_errors", test_errors },
> >>--
> >>2.1.0
> >>
> >
>
diff mbox

Patch

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 53bd635..941fa66 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -1125,6 +1125,9 @@  void gem_require_ring(int fd, int ring_id)
 
 /* prime */
 
+#ifndef DRM_RDWR
+#define DRM_RDWR O_RDWR
+#endif
 /**
  * prime_handle_to_fd:
  * @fd: open i915 drm file descriptor
@@ -1142,7 +1145,7 @@  int prime_handle_to_fd(int fd, uint32_t handle)
 
 	memset(&args, 0, sizeof(args));
 	args.handle = handle;
-	args.flags = DRM_CLOEXEC;
+	args.flags = DRM_CLOEXEC | DRM_RDWR;
 	args.fd = -1;
 
 	do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c
index dc59e8f..ad91371 100644
--- a/tests/prime_mmap.c
+++ b/tests/prime_mmap.c
@@ -22,6 +22,7 @@ 
  *
  * Authors:
  *    Rob Bradford <rob at linux.intel.com>
+ *    Tiago Vignatti <tiago.vignatti at intel.com>
  *
  */
 
@@ -66,6 +67,12 @@  fill_bo(uint32_t handle, size_t size)
 }
 
 static void
+fill_bo_cpu(char *ptr)
+{
+	memcpy(ptr, pattern, sizeof(pattern));
+}
+
+static void
 test_correct(void)
 {
 	int dma_buf_fd;
@@ -180,6 +187,62 @@  test_forked(void)
 	gem_close(fd, handle);
 }
 
+/* test CPU write. This has a rather big implication for the driver which must
+ * guarantee cache synchronization when writing the bo using CPU. */
+static void
+test_correct_cpu_write(void)
+{
+	int dma_buf_fd;
+	char *ptr;
+	uint32_t handle;
+
+	handle = gem_create(fd, BO_SIZE);
+
+	dma_buf_fd = prime_handle_to_fd(fd, handle);
+	igt_assert(errno == 0);
+
+	/* Check correctness of map using write protection (PROT_WRITE) */
+	ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0);
+	igt_assert(ptr != MAP_FAILED);
+
+	/* Fill bo using CPU */
+	fill_bo_cpu(ptr);
+
+	/* Check pattern correctness */
+	igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+
+	munmap(ptr, BO_SIZE);
+	close(dma_buf_fd);
+	gem_close(fd, handle);
+}
+
+/* map from another process and then write using CPU */
+static void
+test_forked_cpu_write(void)
+{
+	int dma_buf_fd;
+	char *ptr;
+	uint32_t handle;
+
+	handle = gem_create(fd, BO_SIZE);
+
+	dma_buf_fd = prime_handle_to_fd(fd, handle);
+	igt_assert(errno == 0);
+
+	igt_fork(childno, 1) {
+		ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0);
+		igt_assert(ptr != MAP_FAILED);
+		fill_bo_cpu(ptr);
+
+		igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
+		munmap(ptr, BO_SIZE);
+		close(dma_buf_fd);
+	}
+	close(dma_buf_fd);
+	igt_waitchildren();
+	gem_close(fd, handle);
+}
+
 static void
 test_refcounting(void)
 {
@@ -346,6 +409,8 @@  igt_main
 		{ "test_map_unmap", test_map_unmap },
 		{ "test_reprime", test_reprime },
 		{ "test_forked", test_forked },
+		{ "test_correct_cpu_write", test_correct_cpu_write },
+		{ "test_forked_cpu_write", test_forked_cpu_write },
 		{ "test_refcounting", test_refcounting },
 		{ "test_dup", test_dup },
 		{ "test_errors", test_errors },