@@ -2490,6 +2490,8 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
ring->semaphore.sync_seqno[j] = 0;
+
+ ring->last_irq_seqno = 0;
}
return 0;
@@ -2821,11 +2823,21 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
return;
}
- if (!fence_locked)
- spin_lock_irqsave(&ring->fence_lock, flags);
-
+ /*
+ * Check for a new seqno. If it hasn't actually changed then early
+ * exit without even grabbing the spinlock. Note that this is safe
+ * because any corruption of last_irq_seqno merely results in doing
+ * the full processing when there is potentially no work to be done.
+ * It can never lead to not processing work that does need to happen.
+ */
seqno = ring->get_seqno(ring, false);
trace_i915_gem_request_notify(ring, seqno);
+ if (seqno == ring->last_irq_seqno)
+ return;
+ ring->last_irq_seqno = seqno;
+
+ if (!fence_locked)
+ spin_lock_irqsave(&ring->fence_lock, flags);
list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_link) {
if (!req->cancelled) {
@@ -3120,7 +3132,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* Tidy up anything left over. This includes a call to
* i915_gem_request_notify() which will make sure that any requests
* that were on the signal pending list get also cleaned up.
+ * NB: The seqno cache must be cleared otherwise the notify call will
+ * simply return immediately.
*/
+ ring->last_irq_seqno = 0;
i915_gem_retire_requests_ring(ring);
/* Having flushed all requests from all queues, we know that all
@@ -359,6 +359,7 @@ struct intel_engine_cs {
spinlock_t fence_lock;
struct list_head fence_signal_list;
struct list_head fence_unsignal_list;
+ uint32_t last_irq_seqno;
};
bool intel_ring_initialized(struct intel_engine_cs *ring);