@@ -2914,7 +2914,7 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
- return i915_seqno_passed(seqno, req->seqno);
+ return i915_seqno_passed(seqno, req->seqno) && !req->elsp_submitted;
}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
@@ -2848,12 +2848,16 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
if (!list_empty(&obj->last_read_req[ring->id]->list))
break;
+ if (!i915_gem_request_completed(obj->last_read_req[ring->id],
+ true))
+ break;
i915_gem_object_retire__read(obj, ring->id);
}
if (unlikely(ring->trace_irq_req &&
- i915_gem_request_completed(ring->trace_irq_req, true))) {
+ i915_gem_request_completed(ring->trace_irq_req,
+ true))) {
ring->irq_put(ring);
i915_gem_request_assign(&ring->trace_irq_req, NULL);
}
@@ -2872,15 +2876,6 @@ i915_gem_retire_requests(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
- if (i915.enable_execlists) {
- unsigned long flags;
-
- spin_lock_irqsave(&ring->execlist_lock, flags);
- idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
-
- intel_execlists_retire_requests(ring);
- }
}
if (idle)
@@ -2956,12 +2951,14 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
- goto retire;
+ if (list_empty(&req->list)) {
+ if (i915_gem_request_completed(req, true))
+ i915_gem_object_retire__read(obj, i);
+ continue;
+ }
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
-retire:
i915_gem_object_retire__read(obj, i);
}
}
@@ -1294,10 +1294,11 @@ static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir,
int test_shift)
{
- if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
- notify_ring(ring);
+ bool need_notify = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
- intel_lrc_irq_handler(ring);
+ need_notify = intel_lrc_irq_handler(ring);
+ if ((iir & (GT_RENDER_USER_INTERRUPT << test_shift)) || need_notify)
+ notify_ring(ring);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
@@ -426,9 +426,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
+ req0->elsp_submitted = 0;
list_del(&req0->execlist_link);
- list_add_tail(&req0->execlist_link,
- &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
@@ -478,11 +477,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
-
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
- list_add_tail(&head_req->execlist_link,
- &ring->execlist_retired_req_list);
return true;
}
}
@@ -497,8 +493,9 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
+ * @return whether a context completed
*/
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+bool intel_lrc_irq_handler(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
@@ -558,6 +555,8 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
_MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
((u32)ring->next_context_status_buffer &
GEN8_CSB_PTR_MASK) << 8));
+
+ return (submit_contexts != 0);
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -566,11 +565,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != ring->default_context)
- intel_lr_context_pin(request);
-
- i915_gem_request_reference(request);
-
spin_lock_irq(&ring->execlist_lock);
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
@@ -588,8 +582,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- list_add_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
}
}
@@ -943,32 +935,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
-{
- struct drm_i915_gem_request *req, *tmp;
- struct list_head retired_list;
-
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (list_empty(&ring->execlist_retired_req_list))
- return;
-
- INIT_LIST_HEAD(&retired_list);
- spin_lock_irq(&ring->execlist_lock);
- list_replace_init(&ring->execlist_retired_req_list, &retired_list);
- spin_unlock_irq(&ring->execlist_lock);
-
- list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
- struct intel_context *ctx = req->ctx;
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
-
- if (ctx_obj && (ctx != ring->default_context))
- intel_lr_context_unpin(req);
- list_del(&req->execlist_link);
- i915_gem_request_unreference(req);
- }
-}
-
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1924,7 +1890,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
- INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ret = i915_cmd_parser_init_ring(ring);
@@ -95,7 +95,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct list_head *vmas);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
+bool intel_lrc_irq_handler(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */
@@ -264,7 +264,6 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
- struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);