@@ -1418,7 +1418,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_execbuffer_params *params = &qe.params;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
- int ret;
+ int ret, i;
bool need_relocs;
int fd_fence_complete = -1;
int fd_fence_wait = lower_32_bits(args->rsvd2);
@@ -1553,6 +1553,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
+ qe.saved_objects = kzalloc(
+ sizeof(*qe.saved_objects) * args->buffer_count,
+ GFP_KERNEL);
+ if (!qe.saved_objects) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/* Look up object handles */
ret = eb_lookup_vmas(eb, exec, args, vm, file);
if (ret)
@@ -1673,7 +1681,30 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->args_DR1 = args->DR1;
params->args_DR4 = args->DR4;
params->batch_obj = batch_obj;
- params->ctx = ctx;
+
+ /*
+ * Save away the list of objects used by this batch buffer for the
+ * purpose of tracking inter-buffer dependencies.
+ */
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: 'drm_gem_object_lookup()' increments the object's
+ * reference count and so must be matched by a
+ * 'drm_gem_object_unreference' call.
+ */
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ qe.saved_objects[i].obj = obj;
+ qe.saved_objects[i].read_only = obj->base.pending_write_domain == 0;
+
+ }
+ qe.num_objs = i;
+
+ /* Lock and save the context object as well. */
+ i915_gem_context_reference(ctx);
+ params->ctx = ctx;
if (args->flags & I915_EXEC_CREATE_FENCE) {
/*
@@ -1738,6 +1769,23 @@ err:
i915_gem_context_unreference(ctx);
eb_destroy(eb);
+ if (qe.saved_objects) {
+ /* Need to release the objects: */
+ for (i = 0; i < qe.num_objs; i++) {
+ if (!qe.saved_objects[i].obj)
+ continue;
+
+ drm_gem_object_unreference(
+ &qe.saved_objects[i].obj->base);
+ }
+
+ kfree(qe.saved_objects);
+
+ /* Context too */
+ if (params->ctx)
+ i915_gem_context_unreference(params->ctx);
+ }
+
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
@@ -158,7 +158,23 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
if (ret)
return ret;
- /* Free everything that is owned by the QE structure: */
+ /* Need to release the objects: */
+ for (i = 0; i < qe->num_objs; i++) {
+ if (!qe->saved_objects[i].obj)
+ continue;
+
+ drm_gem_object_unreference(&qe->saved_objects[i].obj->base);
+ }
+
+ kfree(qe->saved_objects);
+ qe->saved_objects = NULL;
+ qe->num_objs = 0;
+
+ /* Free the context object too: */
+ if (qe->params.ctx)
+ i915_gem_context_unreference(qe->params.ctx);
+
+ /* And anything else owned by the QE structure: */
kfree(qe->params.cliprects);
if (qe->params.dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_execbuff_release_batch_obj(qe->params.batch_obj);
@@ -399,7 +415,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
int flying = 0, queued = 0;
int ret = 0;
bool do_submit;
- uint32_t min_seqno;
+ uint32_t i, min_seqno;
struct list_head remove;
if (list_empty(&scheduler->node_queue[ring->id]))
@@ -503,7 +519,18 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
- /* Free everything that is owned by the node: */
+ /* Release the locked buffers: */
+ for (i = 0; i < node->num_objs; i++) {
+ drm_gem_object_unreference(
+ &node->saved_objects[i].obj->base);
+ }
+ kfree(node->saved_objects);
+
+ /* Context too: */
+ if (node->params.ctx)
+ i915_gem_context_unreference(node->params.ctx);
+
+ /* And anything else owned by the node: */
node->params.request->scheduler_qe = NULL;
i915_gem_request_unreference(node->params.request);
kfree(node->params.cliprects);