@@ -1386,6 +1386,7 @@ out:
* request has not actually been fully processed yet.
*/
spin_lock_irq(&req->ring->fence_lock);
+ req->ring->last_irq_seqno = 0;
i915_gem_request_notify(req->ring, true);
spin_unlock_irq(&req->ring->fence_lock);
}
@@ -2543,6 +2544,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
ring->semaphore.sync_seqno[j] = 0;
+
+ ring->last_irq_seqno = 0;
}
return 0;
@@ -2875,11 +2878,22 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
return;
}
+ /*
+ * Check for a new seqno. If it hasn't actually changed then early
+ * exit without even grabbing the spinlock. Note that this is safe
+ * because any corruption of last_irq_seqno merely results in doing
+ * the full processing when there is potentially no work to be done.
+ * It can never lead to not processing work that does need to happen.
+ */
+ seqno = ring->get_seqno(ring, false);
+ trace_i915_gem_request_notify(ring, seqno);
+ if (seqno == ring->last_irq_seqno)
+ return;
+
if (!fence_locked)
spin_lock_irqsave(&ring->fence_lock, flags);
- seqno = ring->get_seqno(ring, false);
- trace_i915_gem_request_notify(ring, seqno);
+ ring->last_irq_seqno = seqno;
list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_link) {
if (!req->cancelled) {
@@ -3167,7 +3181,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* Tidy up anything left over. This includes a call to
* i915_gem_request_notify() which will make sure that any requests
* that were on the signal pending list get also cleaned up.
+ * NB: The seqno cache must be cleared otherwise the notify call will
+ * simply return immediately.
*/
+ ring->last_irq_seqno = 0;
i915_gem_retire_requests_ring(ring);
/* Having flushed all requests from all queues, we know that all
@@ -363,6 +363,7 @@ struct intel_engine_cs {
spinlock_t fence_lock;
struct list_head fence_signal_list;
struct list_head fence_unsignal_list;
+ uint32_t last_irq_seqno;
};
static inline bool