diff mbox

[4/6] drm/ttm: cope with reserved buffers on swap list in ttm_bo_swapout, v2

Message ID 1354101944-10455-4-git-send-email-maarten.lankhorst@canonical.com (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst Nov. 28, 2012, 11:25 a.m. UTC
Replace the while loop with a simple for each loop, and only run the
delayed destroy cleanup if we can reserve the buffer first.

No race occurs, since lru lock is never dropped any more. An empty list
and a list full of unreservable buffers both cause -EBUSY to be returned,
which is identical to the previous situation.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 44 ++++++++++++++------------------------------
 1 file changed, 14 insertions(+), 30 deletions(-)

Comments

Thomas Hellstrom Nov. 28, 2012, 2:27 p.m. UTC | #1
On 11/28/2012 12:25 PM, Maarten Lankhorst wrote:
> Replace the while loop with a simple for each loop, and only run the
> delayed destroy cleanup if we can reserve the buffer first.
>
> No race occurs, since lru lock is never dropped any more. An empty list
> and a list full of unreservable buffers both cause -EBUSY to be returned,
> which is identical to the previous situation.
>
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c | 44 ++++++++++++++------------------------------
>   1 file changed, 14 insertions(+), 30 deletions(-)
>
For patch 4-6

Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 02b275b..74b296f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1795,41 +1795,25 @@  static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
 	spin_lock(&glob->lru_lock);
-	while (ret == -EBUSY) {
-		if (unlikely(list_empty(&glob->swap_lru))) {
-			spin_unlock(&glob->lru_lock);
-			return -EBUSY;
-		}
-
-		bo = list_first_entry(&glob->swap_lru,
-				      struct ttm_buffer_object, swap);
-		kref_get(&bo->list_kref);
-
-		if (!list_empty(&bo->ddestroy)) {
-			ttm_bo_reserve_locked(bo, false, false, false, 0);
-			ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+	list_for_each_entry(bo, &glob->swap_lru, swap) {
+		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
 
-			kref_put(&bo->list_kref, ttm_bo_release_list);
-			spin_lock(&glob->lru_lock);
-			continue;
-		}
+	if (ret) {
+		spin_unlock(&glob->lru_lock);
+		return ret;
+	}
 
-		/**
-		 * Reserve buffer. Since we unlock while sleeping, we need
-		 * to re-check that nobody removed us from the swap-list while
-		 * we slept.
-		 */
+	kref_get(&bo->list_kref);
 
-		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-		if (unlikely(ret == -EBUSY)) {
-			spin_unlock(&glob->lru_lock);
-			ttm_bo_wait_unreserved(bo, false);
-			kref_put(&bo->list_kref, ttm_bo_release_list);
-			spin_lock(&glob->lru_lock);
-		}
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+		kref_put(&bo->list_kref, ttm_bo_release_list);
+		return ret;
 	}
 
-	BUG_ON(ret != 0);
 	put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);