diff mbox

[2/3] drm/i915: Repeat unbinding during free if interrupted (v6)

Message ID 1279923531-13088-3-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State Deferred, archived
Headers show

Commit Message

Chris Wilson July 23, 2010, 10:18 p.m. UTC
None
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ca05fba..e3ff35e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -547,6 +547,14 @@  typedef struct drm_i915_private {
 		struct list_head fence_list;
 
 		/**
+		 * List of objects currently pending being freed.
+		 *
+		 * These objects are no longer in use, but due to a signal
+		 * we were prevented from freeing them at the appointed time.
+		 */
+		struct list_head deferred_free_list;
+
+		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b7c4ae1..4a21053 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -53,6 +53,7 @@  static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
+static void i915_gem_free_object_tail(struct drm_gem_object *obj);
 
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
@@ -1755,6 +1756,20 @@  i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
+	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+	    struct drm_i915_gem_object *obj_priv, *tmp;
+
+	    /* We must be careful that during unbind() we do not
+	     * accidentally infinitely recurse into retire requests.
+	     * Currently:
+	     *   retire -> free -> unbind -> wait -> retire_ring
+	     */
+	    list_for_each_entry_safe(obj_priv, tmp,
+				     &dev_priv->mm.deferred_free_list,
+				     list)
+		    i915_gem_free_object_tail(&obj_priv->base);
+	}
+
 	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
 	if (HAS_BSD(dev))
 		i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
@@ -4457,20 +4472,19 @@  int i915_gem_init_object(struct drm_gem_object *obj)
 	return 0;
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	int ret;
 
-	trace_i915_gem_object_destroy(obj);
-
-	while (obj_priv->pin_count > 0)
-		i915_gem_object_unpin(obj);
-
-	if (obj_priv->phys_obj)
-		i915_gem_detach_phys_object(dev, obj);
-
-	i915_gem_object_unbind(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret == -ERESTARTSYS) {
+		list_move(&obj_priv->list,
+			  &dev_priv->mm.deferred_free_list);
+		return;
+	}
 
 	if (obj_priv->mmap_offset)
 		i915_gem_free_mmap_offset(obj);
@@ -4482,6 +4496,22 @@  void i915_gem_free_object(struct drm_gem_object *obj)
 	kfree(obj_priv);
 }
 
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+	trace_i915_gem_object_destroy(obj);
+
+	while (obj_priv->pin_count > 0)
+		i915_gem_object_unpin(obj);
+
+	if (obj_priv->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
+	i915_gem_free_object_tail(obj);
+}
+
 /** Unbinds all inactive objects. */
 static int
 i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4755,6 +4785,7 @@  i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
 	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
 	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
 	if (HAS_BSD(dev)) {