@@ -626,6 +626,47 @@ bool intel_context_revoke(struct intel_context *ce)
return ret;
}
+int intel_context_throttle(const struct intel_context *ce)
+{
+ const struct intel_ring *ring = ce->ring;
+ const struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (READ_ONCE(ring->space) >= SZ_1K)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (rq->ring != ring)
+ continue;
+
+ /* Wait until there will be enough space following that rq */
+ if (__intel_ring_space(rq->postfix,
+ ring->emit,
+ ring->size) < ring->size / 2) {
+ if (i915_request_get_rcu(rq)) {
+ rcu_read_unlock();
+
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EINTR;
+
+ rcu_read_lock();
+ i915_request_put(rq);
+ }
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
@@ -226,6 +226,8 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
+int intel_context_throttle(const struct intel_context *ce);
+
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
@@ -1035,6 +1035,9 @@ i915_request_create(struct intel_context *ce)
struct i915_request *rq;
struct intel_timeline *tl;
+ if (intel_context_throttle(ce))
+ return ERR_PTR(-EINTR);
+
tl = intel_context_timeline_lock(ce);
if (IS_ERR(tl))
return ERR_CAST(tl);