@@ -3029,7 +3029,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct intel_rps_client *rps);
+ struct intel_rps_client *rps,
+ bool is_locked);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
@@ -1207,7 +1207,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps,
+ bool is_locked)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
@@ -1217,8 +1218,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
- int ret;
+ int ret = 0;
+ bool busy;
+ might_sleep();
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_gem_request_completed(req))
@@ -1269,6 +1272,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
+ if (is_locked) {
+ /* If this request is being processed by the scheduler
+ * then it is unsafe to sleep with the mutex lock held
+ * as the scheduler may require the lock in order to
+ * progress the request. */
+ if (i915_scheduler_is_request_tracked(req, NULL, &busy)) {
+ if (busy) {
+ ret = -EAGAIN;
+ break;
+ }
+ }
+
+ /* If the request is not tracked by the scheduler then the
+ * regular test can be done. */
+ }
+
if (i915_gem_request_completed(req)) {
ret = 0;
break;
@@ -1449,13 +1468,17 @@ i915_wait_request(struct drm_i915_gem_request *req)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ ret = i915_scheduler_flush_request(req, true);
+ if (ret < 0)
+ return ret;
+
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
+ interruptible, NULL, NULL, true);
if (ret)
return ret;
@@ -1568,7 +1591,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
for (i = 0; ret == 0 && i < n; i++)
ret = __i915_wait_request(requests[i], reset_counter, true,
- NULL, rps);
+ NULL, rps, false);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@@ -3493,7 +3516,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- file->driver_priv);
+ file->driver_priv, false);
i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3526,7 +3549,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
- &i915->rps.semaphores);
+ &i915->rps.semaphores, true);
if (ret)
return ret;
@@ -4485,7 +4508,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL, false);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -31,6 +31,8 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *schedu
struct i915_scheduler_queue_entry *remove);
static int i915_scheduler_submit(struct intel_engine_cs *ring,
bool is_locked);
+static int i915_scheduler_submit_max_priority(struct intel_engine_cs *ring,
+ bool is_locked);
static uint32_t i915_scheduler_count_flying(struct i915_scheduler *scheduler,
struct intel_engine_cs *ring);
static void i915_scheduler_priority_bump_clear(struct i915_scheduler *scheduler);
@@ -554,6 +556,57 @@ void i915_gem_scheduler_work_handler(struct work_struct *work)
}
}
+int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
+ bool is_locked)
+{
+ struct drm_i915_private *dev_priv;
+ struct i915_scheduler *scheduler;
+ unsigned long flags;
+ int flush_count;
+ uint32_t ring_id;
+
+ if (!req)
+ return -EINVAL;
+
+ dev_priv = req->ring->dev->dev_private;
+ scheduler = dev_priv->scheduler;
+
+ if (!scheduler)
+ return 0;
+
+ if (!req->scheduler_qe)
+ return 0;
+
+ if (!I915_SQS_IS_QUEUED(req->scheduler_qe))
+ return 0;
+
+ ring_id = req->ring->id;
+ if (is_locked && (scheduler->flags[ring_id] & i915_sf_submitting)) {
+ /* Scheduler is busy already submitting another batch,
+ * come back later rather than going recursive... */
+ return -EAGAIN;
+ }
+
+ if (list_empty(&scheduler->node_queue[ring_id]))
+ return 0;
+
+ spin_lock_irqsave(&scheduler->lock, flags);
+
+ i915_scheduler_priority_bump_clear(scheduler);
+
+ flush_count = i915_scheduler_priority_bump(scheduler,
+ req->scheduler_qe, scheduler->priority_level_max);
+
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ if (flush_count) {
+ DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", req->ring->name, flush_count);
+ flush_count = i915_scheduler_submit_max_priority(req->ring, is_locked);
+ }
+
+ return flush_count;
+}
+
static void i915_scheduler_priority_bump_clear(struct i915_scheduler *scheduler)
{
struct i915_scheduler_queue_entry *node;
@@ -607,6 +660,44 @@ static int i915_scheduler_priority_bump(struct i915_scheduler *scheduler,
return count;
}
+static int i915_scheduler_submit_max_priority(struct intel_engine_cs *ring,
+ bool is_locked)
+{
+ struct i915_scheduler_queue_entry *node;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+ unsigned long flags;
+ int ret, count = 0;
+ bool found;
+
+ do {
+ found = false;
+ spin_lock_irqsave(&scheduler->lock, flags);
+ list_for_each_entry(node, &scheduler->node_queue[ring->id], link) {
+ if (!I915_SQS_IS_QUEUED(node))
+ continue;
+
+ if (node->priority < scheduler->priority_level_max)
+ continue;
+
+ found = true;
+ break;
+ }
+ spin_unlock_irqrestore(&scheduler->lock, flags);
+
+ if (!found)
+ break;
+
+ ret = i915_scheduler_submit(ring, is_locked);
+ if (ret < 0)
+ return ret;
+
+ count += ret;
+ } while (found);
+
+ return count;
+}
+
static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
struct i915_scheduler_queue_entry **pop_node,
unsigned long *flags)
@@ -821,6 +912,26 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler,
return 0;
}
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy)
+{
+ struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
+
+ if (!scheduler)
+ return false;
+
+ if (req->scheduler_qe == NULL)
+ return false;
+
+ if (completed)
+ *completed = I915_SQS_IS_COMPLETE(req->scheduler_qe);
+ if (busy)
+ *busy = I915_SQS_IS_QUEUED(req->scheduler_qe);
+
+ return true;
+}
+
int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
{
struct i915_scheduler_queue_entry *node;
@@ -92,5 +92,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *q
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
void i915_gem_scheduler_work_handler(struct work_struct *work);
+int i915_scheduler_flush_request(struct drm_i915_gem_request *req,
+ bool is_locked);
+bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req,
+ bool *completed, bool *busy);
#endif /* _I915_SCHEDULER_H_ */
@@ -11258,7 +11258,8 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL,
- &mmio_flip->i915->rps.mmioflips));
+ &mmio_flip->i915->rps.mmioflips,
+ false));
i915_gem_request_unreference(mmio_flip->req);
}
@@ -13258,7 +13259,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true,
- NULL, NULL);
+ NULL, NULL, false);
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO)
@@ -2294,7 +2294,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return __i915_wait_request(req,
atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
to_i915(ring->dev)->mm.interruptible,
- NULL, NULL);
+ NULL, NULL, true);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)