Message ID | 20191118230254.2615942-13-chris@chris-wilson.co.uk (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/19] drm/i915/selftests: Force bonded submission to overlap | expand |
On 18/11/2019 23:02, Chris Wilson wrote: > Now that we never allow the intel_wakeref callbacks to be invoked from > interrupt context, we do not need the irqsafe spinlock for the timeline. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > --- > drivers/gpu/drm/i915/gt/intel_gt_requests.c | 9 ++++----- > drivers/gpu/drm/i915/gt/intel_reset.c | 9 ++++----- > drivers/gpu/drm/i915/gt/intel_timeline.c | 10 ++++------ > 3 files changed, 12 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c > index 7559d6373f49..74356db43325 100644 > --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c > +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c > @@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) > { > struct intel_gt_timelines *timelines = >->timelines; > struct intel_timeline *tl, *tn; > - unsigned long flags; > bool interruptible; > LIST_HEAD(free); > > @@ -43,7 +42,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) > > flush_submission(gt); /* kick the ksoftirqd tasklets */ > > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { > if (!mutex_trylock(&tl->mutex)) > continue; > @@ -51,7 +50,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) > intel_timeline_get(tl); > GEM_BUG_ON(!atomic_read(&tl->active_count)); > atomic_inc(&tl->active_count); /* pin the list element */ > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > > if (timeout > 0) { > struct dma_fence *fence; > @@ -67,7 +66,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) > > retire_requests(tl); > > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > > /* Resume iteration after dropping lock */ > list_safe_reset_next(tl, tn, link); > @@ -82,7 +81,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) > list_add(&tl->link, &free); > } > } > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > > list_for_each_entry_safe(tl, tn, &free, link) > __intel_timeline_free(&tl->kref); > diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c > index 0388f9375366..36189238e13c 100644 > --- a/drivers/gpu/drm/i915/gt/intel_reset.c > +++ b/drivers/gpu/drm/i915/gt/intel_reset.c > @@ -831,7 +831,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) > { > struct intel_gt_timelines *timelines = >->timelines; > struct intel_timeline *tl; > - unsigned long flags; > bool ok; > > if (!test_bit(I915_WEDGED, >->reset.flags)) > @@ -853,7 +852,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) > * > * No more can be submitted until we reset the wedged bit. > */ > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > list_for_each_entry(tl, &timelines->active_list, link) { > struct dma_fence *fence; > > @@ -861,7 +860,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) > if (!fence) > continue; > > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > > /* > * All internal dependencies (i915_requests) will have > @@ -874,10 +873,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) > dma_fence_put(fence); > > /* Restart iteration after droping lock */ > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > tl = list_entry(&timelines->active_list, typeof(*tl), link); > } > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > > /* We must reset pending GPU events before restoring our submission */ > ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ > diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c > index 4f914f0d5eab..bd973d950064 100644 > --- a/drivers/gpu/drm/i915/gt/intel_timeline.c > +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c > @@ -332,7 +332,6 @@ int intel_timeline_pin(struct intel_timeline *tl) > void intel_timeline_enter(struct intel_timeline *tl) > { > struct intel_gt_timelines *timelines = &tl->gt->timelines; > - unsigned long flags; > > /* > * Pretend we are serialised by the timeline->mutex. > @@ -358,16 +357,15 @@ void intel_timeline_enter(struct intel_timeline *tl) > if (atomic_add_unless(&tl->active_count, 1, 0)) > return; > > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > if (!atomic_fetch_inc(&tl->active_count)) > list_add(&tl->link, &timelines->active_list); > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > } > > void intel_timeline_exit(struct intel_timeline *tl) > { > struct intel_gt_timelines *timelines = &tl->gt->timelines; > - unsigned long flags; > > /* See intel_timeline_enter() */ > lockdep_assert_held(&tl->mutex); > @@ -376,10 +374,10 @@ void intel_timeline_exit(struct intel_timeline *tl) > if (atomic_add_unless(&tl->active_count, -1, 1)) > return; > > - spin_lock_irqsave(&timelines->lock, flags); > + spin_lock(&timelines->lock); > if (atomic_dec_and_test(&tl->active_count)) > list_del(&tl->link); > - spin_unlock_irqrestore(&timelines->lock, flags); > + spin_unlock(&timelines->lock); > > /* > * Since this timeline is idle, all bariers upon which we were waiting > Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 7559d6373f49..74356db43325 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -33,7 +33,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) { struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl, *tn; - unsigned long flags; bool interruptible; LIST_HEAD(free); @@ -43,7 +42,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) flush_submission(gt); /* kick the ksoftirqd tasklets */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) continue; @@ -51,7 +50,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) intel_timeline_get(tl); GEM_BUG_ON(!atomic_read(&tl->active_count)); atomic_inc(&tl->active_count); /* pin the list element */ - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); if (timeout > 0) { struct dma_fence *fence; @@ -67,7 +66,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) retire_requests(tl); - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); @@ -82,7 +81,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) list_add(&tl->link, &free); } } - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 0388f9375366..36189238e13c 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -831,7 +831,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) { struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; - unsigned long flags; bool ok; if (!test_bit(I915_WEDGED, >->reset.flags)) @@ -853,7 +852,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * * No more can be submitted until we reset the wedged bit. */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); list_for_each_entry(tl, &timelines->active_list, link) { struct dma_fence *fence; @@ -861,7 +860,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (!fence) continue; - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); /* * All internal dependencies (i915_requests) will have @@ -874,10 +873,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) dma_fence_put(fence); /* Restart iteration after droping lock */ - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); tl = list_entry(&timelines->active_list, typeof(*tl), link); } - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); /* We must reset pending GPU events before restoring our submission */ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 4f914f0d5eab..bd973d950064 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -332,7 +332,6 @@ int intel_timeline_pin(struct intel_timeline *tl) void intel_timeline_enter(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; - unsigned long flags; /* * Pretend we are serialised by the timeline->mutex. @@ -358,16 +357,15 @@ void intel_timeline_enter(struct intel_timeline *tl) if (atomic_add_unless(&tl->active_count, 1, 0)) return; - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); if (!atomic_fetch_inc(&tl->active_count)) list_add(&tl->link, &timelines->active_list); - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); } void intel_timeline_exit(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; - unsigned long flags; /* See intel_timeline_enter() */ lockdep_assert_held(&tl->mutex); @@ -376,10 +374,10 @@ void intel_timeline_exit(struct intel_timeline *tl) if (atomic_add_unless(&tl->active_count, -1, 1)) return; - spin_lock_irqsave(&timelines->lock, flags); + spin_lock(&timelines->lock); if (atomic_dec_and_test(&tl->active_count)) list_del(&tl->link); - spin_unlock_irqrestore(&timelines->lock, flags); + spin_unlock(&timelines->lock); /* * Since this timeline is idle, all bariers upon which we were waiting
Now that we never allow the intel_wakeref callbacks to be invoked from interrupt context, we do not need the irqsafe spinlock for the timeline. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/gt/intel_gt_requests.c | 9 ++++----- drivers/gpu/drm/i915/gt/intel_reset.c | 9 ++++----- drivers/gpu/drm/i915/gt/intel_timeline.c | 10 ++++------ 3 files changed, 12 insertions(+), 16 deletions(-)