diff mbox

[RFC,21/44] drm/i915: Added tracking/locking of batch buffer objects

Message ID 1403803475-16337-22-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison June 26, 2014, 5:24 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The scheduler needs to track interdependencies between batch buffers. These are
calculated by analysing the object lists of the buffers and looking for
commonality. The scheduler also needs to keep those buffers locked long after
the initial IOCTL call has returned to user land.
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   57 +++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_scheduler.c      |   20 +++++++++-
 drivers/gpu/drm/i915/i915_scheduler.h      |    6 +++
 3 files changed, 80 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f73c936..6bb1fd6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1094,6 +1094,9 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	int ret, mode;
 	bool need_relocs;
 	struct i915_scheduler_queue_entry qe;
+#ifdef CONFIG_DRM_I915_SCHEDULER
+	int i;
+#endif
 
 	if (!i915_gem_check_execbuffer(args))
 		return -EINVAL;
@@ -1250,6 +1253,16 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
+#ifdef CONFIG_DRM_I915_SCHEDULER
+	qe.saved_objects = kzalloc(
+			sizeof(*qe.saved_objects) * args->buffer_count,
+			GFP_KERNEL);
+	if (!qe.saved_objects) {
+		ret = -ENOMEM;
+		goto err;
+	}
+#endif
+
 	/* Look up object handles */
 	ret = eb_lookup_vmas(eb, exec, args, vm, file);
 	if (ret)
@@ -1333,10 +1346,33 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	qe.params.args_DR4                = args->DR4;
 	qe.params.batch_obj               = batch_obj;
 	qe.params.cliprects               = cliprects;
-	qe.params.ctx                     = ctx;
 	qe.params.mask                    = mask;
 	qe.params.mode                    = mode;
 
+#ifdef CONFIG_DRM_I915_SCHEDULER
+	/*
+	 * Save away the list of objects used by this batch buffer for the
+	 * purpose of tracking inter-buffer dependencies.
+	 */
+	for (i = 0; i < args->buffer_count; i++) {
+		/*
+		 * NB: 'drm_gem_object_lookup()' increments the object's
+		 * reference count and so must be matched by a
+		 * 'drm_gem_object_unreference' call.
+		 */
+		qe.saved_objects[i].obj =
+			to_intel_bo(drm_gem_object_lookup(dev, file,
+							  exec[i].handle));
+	}
+	qe.num_objs = i;
+
+	/* Lock and save the context object as well. */
+	i915_gem_context_reference(ctx);
+	qe.params.ctx = ctx;
+#else  // CONFIG_DRM_I915_SCHEDULER
+	qe.params.ctx = ctx;
+#endif // CONFIG_DRM_I915_SCHEDULER
+
 	if (flags & I915_DISPATCH_SECURE)
 		qe.params.batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
 	else
@@ -1370,6 +1406,25 @@  err:
 
 	eb_destroy(eb);
 
+#ifdef CONFIG_DRM_I915_SCHEDULER
+	if (qe.saved_objects) {
+		/* Need to release the objects: */
+		for (i = 0; i < qe.num_objs; i++) {
+			if (!qe.saved_objects[i].obj)
+				continue;
+
+			drm_gem_object_unreference(
+					&qe.saved_objects[i].obj->base);
+		}
+
+		kfree(qe.saved_objects);
+
+		/* Context too */
+		if (qe.params.ctx)
+			i915_gem_context_unreference(qe.params.ctx);
+	}
+#endif // CONFIG_DRM_I915_SCHEDULER
+
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d95c789..fc165c2 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -62,7 +62,7 @@  int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 {
 	struct drm_i915_private     *dev_priv = qe->params.dev->dev_private;
 	struct i915_scheduler       *scheduler = dev_priv->scheduler;
-	int ret;
+	int ret, i;
 
 	BUG_ON(!scheduler);
 
@@ -70,7 +70,23 @@  int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 
 	ret = i915_gem_do_execbuffer_final(&qe->params);
 
-	/* Free everything that is owned by the QE structure: */
+	/* Need to release the objects: */
+	for (i = 0; i < qe->num_objs; i++) {
+		if (!qe->saved_objects[i].obj)
+			continue;
+
+		drm_gem_object_unreference(&qe->saved_objects[i].obj->base);
+	}
+
+	kfree(qe->saved_objects);
+	qe->saved_objects = NULL;
+	qe->num_objs = 0;
+
+	/* Free the context object too: */
+	if (qe->params.ctx)
+		i915_gem_context_unreference(qe->params.ctx);
+
+	/* And anything else owned by the QE structure: */
 	kfree(qe->params.cliprects);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 4c3e081..7c88a26 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -45,8 +45,14 @@  struct i915_execbuffer_params {
 	uint32_t                        scheduler_index;
 };
 
+struct i915_scheduler_obj_entry {
+	struct drm_i915_gem_object          *obj;
+};
+
 struct i915_scheduler_queue_entry {
 	struct i915_execbuffer_params       params;
+	struct i915_scheduler_obj_entry     *saved_objects;
+	int                                 num_objs;
 };
 
 bool        i915_scheduler_is_enabled(struct drm_device *dev);