@@ -2238,6 +2238,8 @@ struct drm_i915_gem_request {
/** process identifier submitting this request */
struct pid *pid;
+ struct i915_scheduler_queue_entry *scheduler_qe;
+
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
@@ -2772,6 +2772,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *req, *req_next;
unsigned long flags;
+ bool complete;
u32 seqno;
LIST_HEAD(free_list);
@@ -2785,8 +2786,13 @@ void i915_gem_request_notify(struct intel_engine_cs *ring)
spin_lock_irqsave(&ring->fence_lock, flags);
list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_list) {
if (!req->cancelled) {
- if (!i915_seqno_passed(seqno, req->seqno))
- continue;
+ if (i915_scheduler_is_request_tracked(req, &complete, NULL)) {
+ if (!complete)
+ continue;
+ } else {
+ if (!i915_seqno_passed(seqno, req->seqno))
+ continue;
+ }
fence_signal_locked(&req->fence);
trace_i915_gem_request_complete(req);
@@ -2811,6 +2817,9 @@ void i915_gem_request_notify(struct intel_engine_cs *ring)
i915_gem_request_unreference(req);
}
+
+ /* Necessary? Or does the fence_signal() call do an implicit wakeup? */
+ wake_up_all(&ring->irq_queue);
}
static void i915_fence_timeline_value_str(struct fence *fence, char *str, int size)
@@ -119,6 +119,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
node->stamp = stamp;
i915_gem_request_reference(node->params.request);
+ BUG_ON(node->params.request->scheduler_qe);
+ node->params.request->scheduler_qe = node;
+
/* Need to determine the number of incomplete entries in the list as
* that will be the maximum size of the dependency list.
*
@@ -363,6 +366,13 @@ static void i915_scheduler_seqno_complete(struct intel_engine_cs *ring, uint32_t
got_changes = true;
}
+ /*
+ * Avoid issues with requests not being signalled because their
+ * interrupt has already passed.
+ */
+ if (got_changes)
+ i915_gem_request_notify(ring);
+
/* Should submit new work here if flight list is empty but the DRM
* mutex lock might not be available if a '__wait_request()' call is
* blocking the system. */
@@ -504,6 +514,7 @@ int i915_scheduler_remove(struct intel_engine_cs *ring)
i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
/* Free everything that is owned by the node: */
+ node->params.request->scheduler_qe = NULL;
i915_gem_request_unreference(node->params.request);
kfree(node->params.cliprects);
kfree(node->dep_list);
@@ -774,3 +785,23 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler,
return 0;
}
+
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy)
+{
+ struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ if (!scheduler)
+ return false;
+
+ if (req->scheduler_qe == NULL)
+ return false;
+
+ if (completed)
+ *completed = I915_SQS_IS_COMPLETE(req->scheduler_qe);
+ if (busy)
+ *busy = I915_SQS_IS_QUEUED(req->scheduler_qe);
+
+ return true;
+}
@@ -87,5 +87,7 @@ enum {
int i915_scheduler_init(struct drm_device *dev);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
int i915_scheduler_handle_irq(struct intel_engine_cs *ring);
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy);
#endif /* _I915_SCHEDULER_H_ */