@@ -906,21 +906,20 @@ static int wait_for_engines(struct intel_gt *gt)
}
static long
-wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
+wait_for_timelines(struct intel_gt *gt, unsigned int flags, long timeout)
{
- struct intel_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl;
- mutex_lock(>->mutex);
- list_for_each_entry(tl, >->active_list, link) {
+ mutex_lock(&timelines->mutex);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(>->mutex);
+ mutex_unlock(&timelines->mutex);
/*
* "Race-to-idle".
@@ -940,10 +939,10 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(>->mutex);
- tl = list_entry(>->active_list, typeof(*tl), link);
+ mutex_lock(&timelines->mutex);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(>->mutex);
+ mutex_unlock(&timelines->mutex);
return timeout;
}
@@ -951,16 +950,18 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
+ struct intel_gt *gt = &i915->gt;
+
/* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
+ if (!READ_ONCE(gt->awake))
return 0;
GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
+ yesno(gt->awake));
- timeout = wait_for_timelines(i915, flags, timeout);
+ timeout = wait_for_timelines(gt, flags, timeout);
if (timeout < 0)
return timeout;
@@ -969,7 +970,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(&i915->gt);
+ err = wait_for_engines(gt);
if (err)
return err;