diff mbox

drm/i915: Use our own workqueue to avoid wedging the system along with the GPU.

Message ID 1249341067-12467-1-git-send-email-eric@anholt.net (mailing list archive)
State Accepted
Headers show

Commit Message

Eric Anholt Aug. 3, 2009, 11:11 p.m. UTC
Signed-off-by: Eric Anholt <eric@anholt.net>
---
 drivers/gpu/drm/i915/i915_dma.c |   15 +++++++++++++--
 drivers/gpu/drm/i915/i915_drv.h |    1 +
 drivers/gpu/drm/i915/i915_gem.c |    4 ++--
 drivers/gpu/drm/i915/i915_irq.c |    5 +++--
 4 files changed, 19 insertions(+), 6 deletions(-)

Comments

Chris Wilson Aug. 6, 2009, 4:20 p.m. UTC | #1
I've just encountered a situation whereby saturating the GPU leaves the
system barely responsive as the system waits for dev->struct_mutex.
Either using a separate work queue or adjusting the retire_request work
handler to use mutex_trylock() makes the system responsive again (though
obviously not the application that is submitting 4s of rendering between
throttles.
-ickle
Bruno Prémont Aug. 6, 2009, 4:44 p.m. UTC | #2
Would either of these changes also prevent the CPU-busy-waiting that
happens when running xrandr [KMS enabled]? (probably the EDID reading
which causes this)

For my UP Centrino system with i855GM I get a big fraction of second
stall on each xrandr execution which at least interrupts/pauses audio
playback.

Bruno


On Thu, 06 August 2009 Chris Wilson <chris@chris-wilson.co.uk> wrote:
> I've just encountered a situation whereby saturating the GPU leaves
> the system barely responsive as the system waits for
> dev->struct_mutex. Either using a separate work queue or adjusting
> the retire_request work handler to use mutex_trylock() makes the
> system responsive again (though obviously not the application that is
> submitting 4s of rendering between throttles.
> -ickle
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8c47831..50d1f78 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1186,6 +1186,13 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (ret)
 		goto out_iomapfree;
 
+	dev_priv->wq = create_workqueue("i915");
+	if (dev_priv->wq == NULL) {
+		DRM_ERROR("Failed to create our workqueue.\n");
+		ret = -ENOMEM;
+		goto out_iomapfree;
+	}
+
 	/* enable GEM by default */
 	dev_priv->has_gem = 1;
 
@@ -1211,7 +1218,7 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (!I915_NEED_GFX_HWS(dev)) {
 		ret = i915_init_phys_hws(dev);
 		if (ret != 0)
-			goto out_iomapfree;
+			goto out_workqueue_free;
 	}
 
 	i915_get_mem_freq(dev);
@@ -1245,7 +1252,7 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
 		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
-			goto out_rmmap;
+			goto out_workqueue_free;
 		}
 	}
 
@@ -1256,6 +1263,8 @@  int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	return 0;
 
+out_workqueue_free:
+	destroy_workqueue(dev_priv->wq);
 out_iomapfree:
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 out_rmmap:
@@ -1269,6 +1278,8 @@  int i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	destroy_workqueue(dev_priv->wq);
+
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 	if (dev_priv->mm.gtt_mtrr >= 0) {
 		mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5f3a259..7537f57 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -231,6 +231,7 @@  typedef struct drm_i915_private {
 	spinlock_t error_lock;
 	struct drm_i915_error_state *first_error;
 	struct work_struct error_work;
+	struct workqueue_struct *wq;
 
 	/* Register state */
 	u8 saveLBB;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5bf4203..140bee1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1570,7 +1570,7 @@  i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	}
 
 	if (was_empty && !dev_priv->mm.suspended)
-		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 	return seqno;
 }
 
@@ -1719,7 +1719,7 @@  i915_gem_retire_work_handler(struct work_struct *work)
 	i915_gem_retire_requests(dev);
 	if (!dev_priv->mm.suspended &&
 	    !list_empty(&dev_priv->mm.request_list))
-		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 	mutex_unlock(&dev->struct_mutex);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f340b3f..83aee80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -482,7 +482,7 @@  static void i915_handle_error(struct drm_device *dev)
 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 	}
 
-	schedule_work(&dev_priv->error_work);
+	queue_work(dev_priv->wq, &dev_priv->error_work);
 }
 
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -560,7 +560,8 @@  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 			DRM_DEBUG("hotplug event received, stat 0x%08x\n",
 				  hotplug_status);
 			if (hotplug_status & dev_priv->hotplug_supported_mask)
-				schedule_work(&dev_priv->hotplug_work);
+				queue_work(dev_priv->wq,
+					   &dev_priv->hotplug_work);
 
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);