diff mbox

[04/29] drm/i915: Try harder to allocate an mmap_offset

Message ID 1344696088-24760-5-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Aug. 11, 2012, 2:41 p.m. UTC
Given the persistence of an offset for the lifetime of an object, itis
easy to contemplate how the mmap space becomes badly fragmented to the
point that further allocations fail with ENOSPC. Our only recourse at
this point is to try to purge the objects to release some space and
reattempt the allocation.

References: https://bugs.freedesktop.org/show_bug.cgi?id=39552
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   50 ++++++++++++++++++++++++++++++++-------
 1 file changed, 41 insertions(+), 9 deletions(-)

Comments

Daniel Vetter Aug. 20, 2012, 9:37 a.m. UTC | #1
On Sat, Aug 11, 2012 at 03:41:03PM +0100, Chris Wilson wrote:
> Given the persistence of an offset for the lifetime of an object, itis
> easy to contemplate how the mmap space becomes badly fragmented to the
> point that further allocations fail with ENOSPC. Our only recourse at
> this point is to try to purge the objects to release some space and
> reattempt the allocation.
> 
> References: https://bugs.freedesktop.org/show_bug.cgi?id=39552
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Ok, I've picked up things up to this patch, with the few changes applied
as discussed (plus ditching an unused var that I've forgotten to kill when
purging the put_pages from gem_reset). I'll look at the others later
today.

Thanks for the patches,
-Daniel
Chris Wilson Aug. 20, 2012, 11:31 a.m. UTC | #2
On Mon, 20 Aug 2012 11:37:30 +0200, Daniel Vetter <daniel@ffwll.ch> wrote:
> Ok, I've picked up things up to this patch, with the few changes applied
> as discussed (plus ditching an unused var that I've forgotten to kill when
> purging the put_pages from gem_reset). I'll look at the others later
> today.

I think that's a good point to hand over to QA for the first round of
testing; especially with the swap thrashing tests and sporadic failures
afterwards.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3a7ac38..0e0fc1e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1276,6 +1276,42 @@  i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
 	return i915_gem_get_gtt_size(dev, size, tiling_mode);
 }
 
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (obj->base.map_list.map)
+		return 0;
+
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	/* Badly fragmented mmap space? The only way we can recover
+	 * space is by destroying unwanted objects. We can't randomly release
+	 * mmap_offsets as userspace expects them to be persistent for the
+	 * lifetime of the objects. The closest we can is to release the
+	 * offsets on purgeable objects by truncating it and marking it purged,
+	 * which prevents userspace from ever using that object again.
+	 */
+	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		return ret;
+
+	i915_gem_shrink_all(dev_priv);
+	return drm_gem_create_mmap_offset(&obj->base);
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	if (!obj->base.map_list.map)
+		return;
+
+	drm_gem_free_mmap_offset(&obj->base);
+}
+
 int
 i915_gem_mmap_gtt(struct drm_file *file,
 		  struct drm_device *dev,
@@ -1307,11 +1343,9 @@  i915_gem_mmap_gtt(struct drm_file *file,
 		goto out;
 	}
 
-	if (!obj->base.map_list.map) {
-		ret = drm_gem_create_mmap_offset(&obj->base);
-		if (ret)
-			goto out;
-	}
+	ret = i915_gem_object_create_mmap_offset(obj);
+	if (ret)
+		goto out;
 
 	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
@@ -1360,8 +1394,7 @@  i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 	inode = obj->base.filp->f_path.dentry->d_inode;
 	shmem_truncate_range(inode, 0, (loff_t)-1);
 
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
+	i915_gem_object_free_mmap_offset(obj);
 
 	obj->madv = __I915_MADV_PURGED;
 }
@@ -3614,8 +3647,7 @@  void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	}
 
 	i915_gem_object_put_pages_gtt(obj);
-	if (obj->base.map_list.map)
-		drm_gem_free_mmap_offset(&obj->base);
+	i915_gem_object_free_mmap_offset(obj);
 
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);