@@ -3029,7 +3029,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct intel_rps_client *rps);
+ struct intel_rps_client *rps,
+ bool is_locked);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -1207,7 +1207,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps,
+ bool is_locked)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
@@ -1217,8 +1218,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
- int ret;
+ int ret = 0;
+ bool busy;
+ might_sleep();
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_gem_request_completed(req))
@@ -1269,6 +1272,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
+ if (is_locked) {
+ /* If this request is being processed by the scheduler
+ * then it is unsafe to sleep with the mutex lock held
+ * as the scheduler may require the lock in order to
+ * progress the request. */
+ if (i915_scheduler_is_request_tracked(req, NULL, &busy)) {
+ if (busy) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+
+ /* If the request is not tracked by the scheduler then the
+ * regular test can be done. */
+ }
+
if (i915_gem_request_completed(req)) {
ret = 0;
break;
@@ -1455,7 +1474,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ interruptible, NULL, NULL, true);
if (ret)
return ret;
@@ -1568,7 +1587,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
for (i = 0; ret == 0 && i < n; i++)
ret = __i915_wait_request(requests[i], reset_counter, true,
- NULL, rps);
+ NULL, rps, false);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@@ -3494,7 +3513,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- file->driver_priv);
+ file->driver_priv, false);
i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3527,7 +3546,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
- &i915->rps.semaphores);
+ &i915->rps.semaphores, true);
if (ret)
return ret;
@@ -4486,7 +4505,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL, false);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -847,6 +847,26 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler,
return 0;
}
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy)
+{
+ struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ if (!scheduler)
+ return false;
+
+ if (req->scheduler_qe == NULL)
+ return false;
+
+ if (completed)
+ *completed = I915_SQS_IS_COMPLETE(req->scheduler_qe);
+ if (busy)
+ *busy = I915_SQS_IS_QUEUED(req->scheduler_qe);
+
+ return true;
+}
+
int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
{
struct i915_scheduler_queue_entry *node;
@@ -93,5 +93,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *q
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
void i915_gem_scheduler_work_handler(struct work_struct *work);
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy);
#endif /* _I915_SCHEDULER_H_ */
@@ -11258,7 +11258,8 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL,
- &mmio_flip->i915->rps.mmioflips));
+ &mmio_flip->i915->rps.mmioflips,
+ false));
i915_gem_request_unreference(mmio_flip->req);
}
@@ -13258,7 +13259,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true,
- NULL, NULL);
+ NULL, NULL, false);
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO)
@@ -2294,7 +2294,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return __i915_wait_request(req,
atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
to_i915(ring->dev)->mm.interruptible,
- NULL, NULL);
+ NULL, NULL, true);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)