Message ID | 20230308094106.203686-2-andi.shyti@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Fix error propagation amongst request | expand |
On 08.03.2023 10:41, Andi Shyti wrote: > From: Chris Wilson <chris@chris-wilson.co.uk> > > Before taking exclusive ownership of the ring for emitting the request, > wait for space in the ring to become available. This allows others to > take the timeline->mutex to make forward progresses while userspace is > blocked. > > In particular, this allows regular clients to issue requests on the > kernel context, potentially filling the ring, but allow the higher > priority heartbeats and pulses to still be submitted without being > blocked by the less critical work. > > Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com> > Cc: Maciej Patelczyk <maciej.patelczyk@intel.com> > Cc: stable@vger.kernel.org > Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Regards Andrzej > --- > drivers/gpu/drm/i915/gt/intel_context.c | 41 +++++++++++++++++++++++++ > drivers/gpu/drm/i915/gt/intel_context.h | 2 ++ > drivers/gpu/drm/i915/i915_request.c | 3 ++ > 3 files changed, 46 insertions(+) > > diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c > index 2aa63ec521b89..59cd612a23561 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.c > +++ b/drivers/gpu/drm/i915/gt/intel_context.c > @@ -626,6 +626,47 @@ bool intel_context_revoke(struct intel_context *ce) > return ret; > } > > +int intel_context_throttle(const struct intel_context *ce) > +{ > + const struct intel_ring *ring = ce->ring; > + const struct intel_timeline *tl = ce->timeline; > + struct i915_request *rq; > + int err = 0; > + > + if (READ_ONCE(ring->space) >= SZ_1K) > + return 0; > + > + rcu_read_lock(); > + list_for_each_entry_reverse(rq, &tl->requests, link) { > + if (__i915_request_is_complete(rq)) > + break; > + > + if (rq->ring != ring) > + continue; > + > + /* Wait until there will be enough space following that rq */ > + if (__intel_ring_space(rq->postfix, > + ring->emit, > + ring->size) < ring->size / 2) { > + if (i915_request_get_rcu(rq)) { > + rcu_read_unlock(); > + > + if (i915_request_wait(rq, > + I915_WAIT_INTERRUPTIBLE, > + MAX_SCHEDULE_TIMEOUT) < 0) > + err = -EINTR; > + > + rcu_read_lock(); > + i915_request_put(rq); > + } > + break; > + } > + } > + rcu_read_unlock(); > + > + return err; > +} > + > #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) > #include "selftest_context.c" > #endif > diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h > index 0a8d553da3f43..f919a66cebf5b 100644 > --- a/drivers/gpu/drm/i915/gt/intel_context.h > +++ b/drivers/gpu/drm/i915/gt/intel_context.h > @@ -226,6 +226,8 @@ static inline void intel_context_exit(struct intel_context *ce) > ce->ops->exit(ce); > } > > +int intel_context_throttle(const struct intel_context *ce); > + > static inline struct intel_context *intel_context_get(struct intel_context *ce) > { > kref_get(&ce->ref); > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c > index 630a732aaecca..72aed544f8714 100644 > --- a/drivers/gpu/drm/i915/i915_request.c > +++ b/drivers/gpu/drm/i915/i915_request.c > @@ -1034,6 +1034,9 @@ i915_request_create(struct intel_context *ce) > struct i915_request *rq; > struct intel_timeline *tl; > > + if (intel_context_throttle(ce)) > + return ERR_PTR(-EINTR); > + > tl = intel_context_timeline_lock(ce); > if (IS_ERR(tl)) > return ERR_CAST(tl);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2aa63ec521b89..59cd612a23561 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -626,6 +626,47 @@ bool intel_context_revoke(struct intel_context *ce) return ret; } +int intel_context_throttle(const struct intel_context *ce) +{ + const struct intel_ring *ring = ce->ring; + const struct intel_timeline *tl = ce->timeline; + struct i915_request *rq; + int err = 0; + + if (READ_ONCE(ring->space) >= SZ_1K) + return 0; + + rcu_read_lock(); + list_for_each_entry_reverse(rq, &tl->requests, link) { + if (__i915_request_is_complete(rq)) + break; + + if (rq->ring != ring) + continue; + + /* Wait until there will be enough space following that rq */ + if (__intel_ring_space(rq->postfix, + ring->emit, + ring->size) < ring->size / 2) { + if (i915_request_get_rcu(rq)) { + rcu_read_unlock(); + + if (i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) + err = -EINTR; + + rcu_read_lock(); + i915_request_put(rq); + } + break; + } + } + rcu_read_unlock(); + + return err; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_context.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 0a8d553da3f43..f919a66cebf5b 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -226,6 +226,8 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } +int intel_context_throttle(const struct intel_context *ce); + static inline struct intel_context *intel_context_get(struct intel_context *ce) { kref_get(&ce->ref); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 630a732aaecca..72aed544f8714 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1034,6 +1034,9 @@ i915_request_create(struct intel_context *ce) struct i915_request *rq; struct intel_timeline *tl; + if (intel_context_throttle(ce)) + return ERR_PTR(-EINTR); + tl = intel_context_timeline_lock(ce); if (IS_ERR(tl)) return ERR_CAST(tl);