@@ -40,7 +40,7 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gt_timelines {
- struct mutex mutex; /* protects list */
+ spinlock_t lock; /* protects active_list */
struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */
@@ -811,7 +811,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
@@ -819,6 +819,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!rq)
continue;
+ spin_unlock(&timelines->lock);
+
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
@@ -828,8 +830,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
+
+ /* Restart iteration after droping lock */
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
intel_gt_sanitize(gt, false);
@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = >->timelines;
- mutex_init(&timelines->mutex);
+ spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
spin_lock_init(&timelines->hwsp_lock);
@@ -345,9 +345,9 @@ void intel_timeline_enter(struct intel_timeline *tl)
return;
GEM_BUG_ON(!tl->active_count); /* overflow? */
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_add(&tl->link, &timelines->active_list);
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
}
void intel_timeline_exit(struct intel_timeline *tl)
@@ -358,9 +358,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (--tl->active_count)
return;
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_del(&tl->link);
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
/*
* Since this timeline is idle, all bariers upon which we were waiting
@@ -548,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->active_list));
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
-
- mutex_destroy(&timelines->mutex);
}
void intel_timelines_fini(struct drm_i915_private *i915)
@@ -897,18 +897,18 @@ static long
wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- struct intel_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
- mutex_lock(>->mutex);
- list_for_each_entry(tl, >->active_list, link) {
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(>->mutex);
+ spin_unlock(&timelines->lock);
/*
* "Race-to-idle".
@@ -928,10 +928,10 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(>->mutex);
- tl = list_entry(>->active_list, typeof(*tl), link);
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(>->mutex);
+ spin_unlock(&timelines->lock);
return timeout;
}
Convert the list manipulation of active to use spinlocks so that we can perform the updates from underneath a quick interrupt callback. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 10 ++++++++-- drivers/gpu/drm/i915/gt/intel_timeline.c | 12 +++++------- drivers/gpu/drm/i915/i915_gem.c | 14 +++++++------- 4 files changed, 21 insertions(+), 17 deletions(-)