@@ -1402,6 +1402,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
fence_signal_locked(&request->fence);
}
+ if (request->scheduler_qe)
+ i915_gem_scheduler_clean_node(request->scheduler_qe);
+
i915_gem_request_unreference(request);
}
@@ -406,6 +406,41 @@ void i915_scheduler_wakeup(struct drm_device *dev)
queue_work(dev_priv->wq, &dev_priv->mm.scheduler_work);
}
+void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
+{
+ uint32_t i;
+
+ if (!I915_SQS_IS_COMPLETE(node)) {
+ WARN(!node->params.request->cancelled,
+ "Cleaning active node: %d!\n", node->status);
+ return;
+ }
+
+ if (node->params.batch_obj) {
+ /* The batch buffer must be unpinned before it is unreferenced
+ * otherwise the unpin fails with a missing vma!? */
+ if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
+ i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
+
+ node->params.batch_obj = NULL;
+ }
+
+ /* Release the locked buffers: */
+ for (i = 0; i < node->num_objs; i++) {
+ drm_gem_object_unreference(
+ &node->saved_objects[i].obj->base);
+ }
+ kfree(node->saved_objects);
+ node->saved_objects = NULL;
+ node->num_objs = 0;
+
+ /* Context too: */
+ if (node->params.ctx) {
+ i915_gem_context_unreference(node->params.ctx);
+ node->params.ctx = NULL;
+ }
+}
+
static int i915_scheduler_remove(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -415,7 +450,7 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
int flying = 0, queued = 0;
int ret = 0;
bool do_submit;
- uint32_t i, min_seqno;
+ uint32_t min_seqno;
struct list_head remove;
if (list_empty(&scheduler->node_queue[ring->id]))
@@ -514,21 +549,8 @@ static int i915_scheduler_remove(struct intel_engine_cs *ring)
node = list_first_entry(&remove, typeof(*node), link);
list_del(&node->link);
- /* The batch buffer must be unpinned before it is unreferenced
- * otherwise the unpin fails with a missing vma!? */
- if (node->params.dispatch_flags & I915_DISPATCH_SECURE)
- i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
-
- /* Release the locked buffers: */
- for (i = 0; i < node->num_objs; i++) {
- drm_gem_object_unreference(
- &node->saved_objects[i].obj->base);
- }
- kfree(node->saved_objects);
-
- /* Context too: */
- if (node->params.ctx)
- i915_gem_context_unreference(node->params.ctx);
+ /* Free up all the DRM object references */
+ i915_gem_scheduler_clean_node(node);
/* And anything else owned by the node: */
node->params.request->scheduler_qe = NULL;
@@ -88,6 +88,7 @@ bool i915_scheduler_is_enabled(struct drm_device *dev);
int i915_scheduler_init(struct drm_device *dev);
int i915_scheduler_closefile(struct drm_device *dev,
struct drm_file *file);
+void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);