@@ -2477,7 +2477,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
/* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
- ret = intel_ring_idle(ring);
+ ret = intel_ring_idle(ring, false);
if (ret)
return ret;
}
@@ -3788,7 +3788,7 @@ int i915_gpu_idle(struct drm_device *dev)
i915_add_request_no_flush(req);
}
- ret = intel_ring_idle(ring);
+ ret = intel_ring_idle(ring, true);
if (ret)
return ret;
}
@@ -1014,7 +1014,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
if (!intel_ring_initialized(ring))
return;
- ret = intel_ring_idle(ring);
+ ret = intel_ring_idle(ring, true);
if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -2278,9 +2278,22 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
intel_ring_update_space(ringbuf);
}
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_ring_idle(struct intel_engine_cs *ring, bool flush)
{
struct drm_i915_gem_request *req;
+ int ret;
+
+ /*
+ * NB: Must not flush the scheduler if this idle request is from
+ * within an execbuff submission (i.e. due to 'get_seqno' calling
+ * 'wrap_seqno' calling 'idle'). As that would lead to recursive
+ * flushes!
+ */
+ if (flush) {
+ ret = i915_scheduler_flush(ring, true);
+ if (ret)
+ return ret;
+ }
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
@@ -3085,7 +3098,7 @@ intel_stop_ring_buffer(struct intel_engine_cs *ring)
if (!intel_ring_initialized(ring))
return;
- ret = intel_ring_idle(ring);
+ ret = intel_ring_idle(ring, true);
if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
@@ -488,7 +488,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
+int __must_check intel_ring_idle(struct intel_engine_cs *ring, bool flush);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);