@@ -68,7 +68,7 @@ static signed long i915_fence_wait(struct fence *fence,
ret = __i915_wait_request(to_i915_request(fence),
interruptible, timeout,
- NULL);
+ NO_WAITBOOST);
if (ret == -ETIME)
return 0;
@@ -621,7 +621,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (INTEL_INFO(req->i915)->gen >= 6)
+ if (!IS_ERR(rps) && INTEL_INFO(req->i915)->gen >= 6)
gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
intel_wait_init(&wait, req->fence.seqno);
@@ -691,7 +691,7 @@ complete:
*timeout = 0;
}
- if (ret == 0 && rps &&
+ if (ret == 0 && !IS_ERR_OR_NULL(rps) &&
req->fence.seqno == req->ring->last_submitted_seqno) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
@@ -179,6 +179,7 @@ void __i915_add_request(struct drm_i915_gem_request *req,
__i915_add_request(req, NULL, false)
struct intel_rps_client;
+#define NO_WAITBOOST ERR_PTR(-1)
int __i915_wait_request(struct drm_i915_gem_request *req,
bool interruptible,
We want to restrict waitboosting to known process contexts, where we can track which clients are receiving waitboosts and prevent excessive power wasting. For fence_wait() we do not have any client tracking and so that leaves it open to abuse. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem_request.c | 6 +++--- drivers/gpu/drm/i915/i915_gem_request.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-)