diff mbox

[v2,2/4] drm/ttm: replace drm_mm_pre_get() by direct alloc

Message ID 1374925079-2068-1-git-send-email-dh.herrmann@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Herrmann July 27, 2013, 11:37 a.m. UTC
Instead of calling drm_mm_pre_get() in a row, we now preallocate the node
and then use the atomic insertion functions. This has the exact same
semantics and there is no reason to use the racy pre-allocations.

Note that ttm_bo_man_get_node() does not run in atomic context. Nouveau
already uses GFP_KERNEL alloc in nouveau/nouveau_ttm.c in
nouveau_gart_manager_new(). So we can do the same in
ttm_bo_man_get_node().

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/ttm/ttm_bo_manager.c | 42 +++++++++++++++++-------------------
 1 file changed, 20 insertions(+), 22 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4be29e..c58eba33 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,29 +61,25 @@  static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	lpfn = placement->lpfn;
 	if (!lpfn)
 		lpfn = man->size;
-	do {
-		ret = drm_mm_pre_get(mm);
-		if (unlikely(ret))
-			return ret;
 
-		spin_lock(&rman->lock);
-		node = drm_mm_search_free_in_range(mm,
-					mem->num_pages, mem->page_alignment,
-					placement->fpfn, lpfn,
-					DRM_MM_SEARCH_BEST);
-		if (unlikely(node == NULL)) {
-			spin_unlock(&rman->lock);
-			return 0;
-		}
-		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-						     mem->page_alignment,
-						     placement->fpfn,
-						     lpfn);
-		spin_unlock(&rman->lock);
-	} while (node == NULL);
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	spin_lock(&rman->lock);
+	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+					  mem->page_alignment,
+					  placement->fpfn, lpfn,
+					  DRM_MM_SEARCH_BEST);
+	spin_unlock(&rman->lock);
+
+	if (unlikely(ret)) {
+		kfree(node);
+	} else {
+		mem->mm_node = node;
+		mem->start = node->start;
+	}
 
-	mem->mm_node = node;
-	mem->start = node->start;
 	return 0;
 }
 
@@ -94,8 +90,10 @@  static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 
 	if (mem->mm_node) {
 		spin_lock(&rman->lock);
-		drm_mm_put_block(mem->mm_node);
+		drm_mm_remove_node(mem->mm_node);
 		spin_unlock(&rman->lock);
+
+		kfree(mem->mm_node);
 		mem->mm_node = NULL;
 	}
 }