diff mbox

[131/190] drm/i915: Pin the pages first in shmem prepare read/write

Message ID 1452509174-16671-45-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Jan. 11, 2016, 10:45 a.m. UTC
There is an improbable, but not impossible, case that if we leave the
pages unpin as we operate on the object, then somebody may steal the
lock and change the cache domains after we have already inspected them.

(Whilst here, avail ourselves of the opportunity to take a couple of
steps to make the two functions look more similar.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 88 ++++++++++++++++++++++++-----------------
 1 file changed, 51 insertions(+), 37 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index edc00b7c82b1..dcdc5c8a5ba8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -500,13 +500,22 @@  int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	if (!obj->base.filp)
 		return -EINVAL;
 
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		goto out;
+
+	ret = i915_gem_object_wait_rendering(obj, true);
+	if (ret)
+		goto err_unpin;
+
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-		ret = i915_gem_object_wait_rendering(obj, true);
-		if (ret)
-			return ret;
-
 		/* If we're not in the cpu read domain, set ourself into the gtt
 		 * read domain and manually flush cachelines (if required). This
 		 * optimizes for the case when the gpu will dirty the data
@@ -515,26 +524,25 @@  int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 							obj->cache_level);
 	}
 
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		return ret;
-
-	i915_gem_object_pin_pages(obj);
-
 	if (*needs_clflush && !cpu_has_clflush) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, false);
-		if (ret) {
-			i915_gem_object_unpin_pages(obj);
-			return ret;
-		}
+		if (ret)
+			goto err_unpin;
+
 		*needs_clflush = 0;
 	}
 
+out:
+	/* return with the pages pinned */
 	return 0;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+	return ret;
 }
 
 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
-				    unsigned *needs_clflush)
+				     unsigned *needs_clflush)
 {
 	int ret;
 
@@ -542,20 +550,27 @@  int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	if (!obj->base.filp)
 		return -EINVAL;
 
-	i915_gem_object_flush_gtt_write_domain(obj);
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
 
-	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
-		ret = i915_gem_object_wait_rendering(obj, false);
-		if (ret)
-			return ret;
+	i915_gem_object_pin_pages(obj);
 
-		/* If we're not in the cpu write domain, set ourself into the
-		 * gtt write domain and manually flush cachelines (as required).
-		 * This optimizes for the case when the gpu will use the data
-		 * right away and we therefore have to clflush anyway.
-		 */
-		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;
-	}
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		goto out;
+
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		goto err_unpin;
+
+	i915_gem_object_flush_gtt_write_domain(obj);
+
+	/* If we're not in the cpu write domain, set ourself into the
+	 * gtt write domain and manually flush cachelines (as required).
+	 * This optimizes for the case when the gpu will use the data
+	 * right away and we therefore have to clflush anyway.
+	 */
+	*needs_clflush |= cpu_write_needs_clflush(obj) << 1;
 
 	/* Same trick applies to invalidate partially written cachelines read
 	 * before writing.
@@ -564,24 +579,23 @@  int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
 							 obj->cache_level);
 
-	ret = i915_gem_object_get_pages(obj);
-	if (ret)
-		return ret;
-
-	i915_gem_object_pin_pages(obj);
-
 	if (*needs_clflush && !cpu_has_clflush) {
 		ret = i915_gem_object_set_to_cpu_domain(obj, true);
-		if (ret) {
-			i915_gem_object_unpin_pages(obj);
-			return ret;
-		}
+		if (ret)
+			goto err_unpin;
+
 		*needs_clflush = 0;
 	}
 
+out:
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	obj->dirty = 1;
+	/* return with the pages pinned */
 	return 0;
+
+err_unpin:
+	i915_gem_object_unpin_pages(obj);
+	return ret;
 }
 
 /* Per-page copy function for the shmem pread fastpath.