diff mbox

[libdrm,3/3] intel: Support passing of explicit fencing from execbuf

Message ID 20170127111202.17948-3-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Jan. 27, 2017, 11:12 a.m. UTC
Allow the caller to pass in an fd to an array of fences to control
serialisation of the execbuf in the kernel and on the GPU, and in return
allow creation of a fence fd for signaling the completion (and flushing)
of the batch. When the returned fence is signaled, all writes to the
buffers inside the batch will be complete and coherent from the cpu, or
other consumers. The return fence is a sync_file object and can be
passed to other users (such as atomic modesetting, or other drivers).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 intel/intel_bufmgr.h     |  6 ++++++
 intel/intel_bufmgr_gem.c | 31 +++++++++++++++++++++++++++----
 2 files changed, 33 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h
index f43ee470..11579fbc 100644
--- a/intel/intel_bufmgr.h
+++ b/intel/intel_bufmgr.h
@@ -221,6 +221,12 @@  int drm_intel_gem_context_get_id(drm_intel_context *ctx,
 void drm_intel_gem_context_destroy(drm_intel_context *ctx);
 int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
 				  int used, unsigned int flags);
+int drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
+				drm_intel_context *ctx,
+				int used,
+				int in_fence,
+				int *out_fence,
+				unsigned int flags);
 
 int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
 drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index 554d079b..9195f3e6 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -2376,6 +2376,7 @@  drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
 static int
 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
 	 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+	 int in_fence, int *out_fence,
 	 unsigned int flags)
 {
 	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -2430,12 +2431,20 @@  do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
 	else
 		i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
 	execbuf.rsvd2 = 0;
+	if (in_fence != -1) {
+		execbuf.rsvd2 = in_fence;
+		execbuf.flags |= I915_EXEC_FENCE_IN;
+	}
+	if (out_fence != NULL) {
+		*out_fence = -1;
+		execbuf.flags |= I915_EXEC_FENCE_OUT;
+	}
 
 	if (bufmgr_gem->no_exec)
 		goto skip_execution;
 
 	ret = drmIoctl(bufmgr_gem->fd,
-		       DRM_IOCTL_I915_GEM_EXECBUFFER2,
+		       DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
 		       &execbuf);
 	if (ret != 0) {
 		ret = -errno;
@@ -2451,6 +2460,9 @@  do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
 	}
 	drm_intel_update_buffer_offsets2(bufmgr_gem);
 
+	if (ret == 0 && out_fence != NULL)
+		*out_fence = execbuf.rsvd2 >> 32;
+
 skip_execution:
 	if (bufmgr_gem->bufmgr.debug)
 		drm_intel_gem_dump_validation_list(bufmgr_gem);
@@ -2476,7 +2488,7 @@  drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
 		       int DR4)
 {
 	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
-			I915_EXEC_RENDER);
+			-1, NULL, I915_EXEC_RENDER);
 }
 
 static int
@@ -2485,14 +2497,25 @@  drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
 			unsigned int flags)
 {
 	return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
-			flags);
+			-1, NULL, flags);
 }
 
 int
 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
 			      int used, unsigned int flags)
 {
-	return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
+	return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
+}
+
+int
+drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
+			    drm_intel_context *ctx,
+			    int used,
+			    int in_fence,
+			    int *out_fence,
+			    unsigned int flags)
+{
+	return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
 }
 
 static int