diff mbox series

drm/i915/gt: Flush other retirees inside intel_gt_retire_requests()

Message ID 20191223211008.2371613-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series drm/i915/gt: Flush other retirees inside intel_gt_retire_requests() | expand

Commit Message

Chris Wilson Dec. 23, 2019, 9:10 p.m. UTC
Our goal in wait_for_idle (intel_gt_retire_requests) is to the current
workload *and* their idle barriers. This requires us to notice the late
arrival of those, which is done by inspecting the list of active
timelines. However, if a concurrent retirer is running that new timeline
may not be added until after we drop the lock -- so flush concurrent
retirers before we take the lock and inspect the list.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/878
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_gt_requests.c | 16 +++++-----------
 1 file changed, 5 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 9e75fa1b6bc1..fc691c130ba6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -26,21 +26,18 @@  static bool retire_requests(struct intel_timeline *tl)
 	return !i915_active_fence_isset(&tl->last_request);
 }
 
-static bool flush_submission(struct intel_gt *gt)
+static void flush_submission(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
-	bool active = false;
 
 	if (!intel_gt_pm_is_awake(gt))
-		return false;
+		return;
 
 	for_each_engine(engine, gt, id) {
-		active |= intel_engine_flush_submission(engine);
-		active |= flush_work(&engine->retire_work);
+		intel_engine_flush_submission(engine);
+		flush_work(&engine->retire_work);
 	}
-
-	return active;
 }
 
 static void engine_retire(struct work_struct *work)
@@ -126,7 +123,6 @@  long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 		timeout = -timeout, interruptible = false;
 
 	flush_submission(gt); /* kick the ksoftirqd tasklets */
-
 	spin_lock(&timelines->lock);
 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
 		if (!mutex_trylock(&tl->mutex)) {
@@ -153,6 +149,7 @@  long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
 		active_count += !retire_requests(tl);
 
+		flush_submission(gt); /* sync with concurrent retirees */
 		spin_lock(&timelines->lock);
 
 		/* Resume iteration after dropping lock */
@@ -173,9 +170,6 @@  long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 	list_for_each_entry_safe(tl, tn, &free, link)
 		__intel_timeline_free(&tl->kref);
 
-	if (flush_submission(gt))
-		active_count++;
-
 	return active_count ? timeout : 0;
 }