diff mbox

[10/10] drm/ttm: remove reliance on ttm_bo_wait_unreserved

Message ID 1352728811-21860-10-git-send-email-maarten.lankhorst@canonical.com (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst Nov. 12, 2012, 2 p.m. UTC
Slightly makes things more complicated, but instead of testing for
unreserved and starting over, try to block and acquire reservation
first, then start over.

This maps a lot better to a blocking acquire operation.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 drivers/gpu/drm/nouveau/nouveau_gem.c  | 19 +++++++++---
 drivers/gpu/drm/ttm/ttm_bo.c           | 33 +++++++++++++++++++--
 drivers/gpu/drm/ttm/ttm_execbuf_util.c | 53 ++++++++++++++++++++--------------
 include/drm/ttm/ttm_bo_driver.h        | 24 +++++++--------
 4 files changed, 89 insertions(+), 40 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 7b9364b..6f58604 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -321,6 +321,7 @@  validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
 	uint32_t sequence;
 	int trycnt = 0;
 	int ret, i;
+	struct nouveau_bo *res_bo = NULL;
 
 	sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
 retry:
@@ -341,6 +342,11 @@  retry:
 			return -ENOENT;
 		}
 		nvbo = gem->driver_private;
+		if (nvbo == res_bo) {
+			res_bo = NULL;
+			drm_gem_object_unreference_unlocked(gem);
+			continue;
+		}
 
 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
 			NV_ERROR(drm, "multiple instances of buffer %d on "
@@ -353,15 +359,18 @@  retry:
 		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
 		if (ret) {
 			validate_fini(op, NULL);
-			if (unlikely(ret == -EAGAIN))
-				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
-			drm_gem_object_unreference_unlocked(gem);
+			if (unlikely(ret == -EAGAIN)) {
+				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+							      sequence);
+				if (!ret)
+					res_bo = nvbo;
+			}
 			if (unlikely(ret)) {
+				drm_gem_object_unreference_unlocked(gem);
 				if (ret != -ERESTARTSYS)
 					NV_ERROR(drm, "fail reserve\n");
 				return ret;
 			}
-			goto retry;
 		}
 
 		b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -383,6 +392,8 @@  retry:
 			validate_fini(op, NULL);
 			return -EINVAL;
 		}
+		if (nvbo == res_bo)
+			goto retry;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e57dae5..bc90f6b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -166,7 +166,8 @@  static void ttm_bo_release_list(struct kref *list_kref)
 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 }
 
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+				  bool interruptible)
 {
 	if (interruptible) {
 		return wait_event_interruptible(bo->event_queue,
@@ -176,7 +177,6 @@  int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 		return 0;
 	}
 }
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
@@ -320,6 +320,35 @@  int ttm_bo_reserve(struct ttm_buffer_object *bo,
 	return ret;
 }
 
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+			    bool interruptible, uint32_t sequence)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count = 0;
+	int ret;
+
+	WARN_ON(!list_empty_careful(&bo->ddestroy));
+
+	ret = ttm_bo_reserve_nolru(bo, interruptible, false, false, 0);
+	if (likely(ret == 0)) {
+		/**
+		 * Wake up waiters that may need to recheck for deadlock,
+		 * since we unset seq_valid in ttm_bo_reserve_nolru
+		 */
+		bo->val_seq = sequence;
+		bo->seq_valid = true;
+		wake_up_all(&bo->event_queue);
+
+		spin_lock(&glob->lru_lock);
+		put_count = ttm_bo_del_from_lru(bo);
+		spin_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
+
 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
 {
 	ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index b3fe824..de1504f 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@  static void ttm_eu_list_ref_sub(struct list_head *list)
 	}
 }
 
-static int ttm_eu_wait_unreserved_locked(struct list_head *list,
-					 struct ttm_buffer_object *bo)
-{
-	struct ttm_bo_global *glob = bo->glob;
-	int ret;
-
-	ttm_eu_del_from_lru_locked(list);
-	spin_unlock(&glob->lru_lock);
-	ret = ttm_bo_wait_unreserved(bo, true);
-	spin_lock(&glob->lru_lock);
-	if (unlikely(ret != 0))
-		ttm_eu_backoff_reservation_locked(list);
-	return ret;
-}
-
-
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
@@ -145,34 +129,59 @@  int ttm_eu_reserve_buffers(struct list_head *list)
 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 	glob = entry->bo->glob;
 
-retry:
 	spin_lock(&glob->lru_lock);
 	val_seq = entry->bo->bdev->val_seq++;
 
+retry:
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
+		/* already slowpath reserved? */
+		if (entry->reserved)
+			continue;
+
 		WARN_ON(!atomic_read(&bo->kref.refcount));
-retry_this_bo:
+
 		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
 		switch (ret) {
 		case 0:
 			break;
 		case -EBUSY:
-			ret = ttm_eu_wait_unreserved_locked(list, bo);
-			if (unlikely(ret != 0)) {
+			ttm_eu_del_from_lru_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ret = ttm_bo_reserve_nolru(bo, true, false,
+						   true, val_seq);
+			spin_lock(&glob->lru_lock);
+			if (!ret)
+				break;
+
+			if (ret != -EAGAIN) {
+				ttm_eu_backoff_reservation_locked(list);
 				spin_unlock(&glob->lru_lock);
 				ttm_eu_list_ref_sub(list);
 				return ret;
 			}
-			goto retry_this_bo;
+
+			/* fallthrough */
 		case -EAGAIN:
+			/* uh oh, we lost out, drop every reservation and try
+			 * to only reserve this buffer, then start over if
+			 * this succeeds.
+			 */
 			ttm_eu_backoff_reservation_locked(list);
 			spin_unlock(&glob->lru_lock);
 			ttm_eu_list_ref_sub(list);
-			ret = ttm_bo_wait_unreserved(bo, true);
+			ret = ttm_bo_reserve_slowpath(bo, true, val_seq);
 			if (unlikely(ret != 0))
 				return ret;
+			entry->removed = entry->reserved = true;
+			spin_lock(&glob->lru_lock);
+			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+				ttm_eu_backoff_reservation_locked(list);
+				spin_unlock(&glob->lru_lock);
+				ttm_eu_list_ref_sub(list);
+				return -EBUSY;
+			}
 			goto retry;
 		default:
 			ttm_eu_backoff_reservation_locked(list);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e9cdae1..26af40c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -824,6 +824,18 @@  extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 			  bool interruptible,
 			  bool no_wait, bool use_sequence, uint32_t sequence);
 
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+				   bool interruptible, uint32_t sequence);
 
 /**
  * ttm_bo_reserve_nolru:
@@ -871,18 +883,6 @@  extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
  */
 extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
 
-/**
- * ttm_bo_wait_unreserved
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Wait for a struct ttm_buffer_object to become unreserved.
- * This is typically used in the execbuf code to relax cpu-usage when
- * a potential deadlock condition backoff.
- */
-extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
-				  bool interruptible);
-
 /*
  * ttm_bo_util.c
  */