@@ -1149,6 +1149,9 @@ int i915_driver_unload(struct drm_device *dev)
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
+ /* Cancel the scheduler work handler, which should be idle now. */
+ cancel_work_sync(&dev_priv->mm.scheduler_work);
+
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1285,6 +1285,16 @@ struct i915_gem_mm {
struct delayed_work retire_work;
/**
+ * New scheme is to get an interrupt after every work packet
+ * in order to allow the low latency scheduling of pending
+ * packets. The idea behind adding new packets to a pending
+ * queue rather than directly into the hardware ring buffer
+ * is to allow high priority packets to over take low priority
+ * ones.
+ */
+ struct work_struct scheduler_work;
+
+ /**
* When we detect an idle GPU, we want to turn on
* powersaving features. So once we see that there
* are no more requests outstanding and no more
@@ -5389,6 +5389,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
i915_gem_idle_work_handler);
+ INIT_WORK(&dev_priv->mm.scheduler_work,
+ i915_gem_scheduler_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
@@ -358,10 +358,12 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
*/
void i915_scheduler_wakeup(struct drm_device *dev)
{
- /* XXX: Need to call i915_scheduler_remove() via work handler. */
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ queue_work(dev_priv->wq, &dev_priv->mm.scheduler_work);
}
-int i915_scheduler_remove(struct intel_engine_cs *ring)
+static int i915_scheduler_remove(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_scheduler *scheduler = dev_priv->scheduler;
@@ -487,6 +489,21 @@ int i915_scheduler_remove(struct intel_engine_cs *ring)
return ret;
}
+void i915_gem_scheduler_work_handler(struct work_struct *work)
+{
+ struct intel_engine_cs *ring;
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+ int i;
+
+ dev_priv = container_of(work, struct drm_i915_private, mm.scheduler_work);
+ dev = dev_priv->dev;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_scheduler_remove(ring);
+ }
+}
+
static void i915_scheduler_priority_bump_clear(struct i915_scheduler *scheduler)
{
struct i915_scheduler_queue_entry *node;
@@ -90,5 +90,6 @@ int i915_scheduler_closefile(struct drm_device *dev,
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
void i915_scheduler_wakeup(struct drm_device *dev);
+void i915_gem_scheduler_work_handler(struct work_struct *work);
#endif /* _I915_SCHEDULER_H_ */