diff mbox

[2/4] drm/ttm: replace drm_mm_pre_get() by direct alloc

Message ID 1374760562-6096-3-git-send-email-dh.herrmann@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

David Herrmann July 25, 2013, 1:56 p.m. UTC
Instead of calling drm_mm_pre_get() in a row, we now preallocate the node
and then use the atomic insertion functions. This has the exact same
semantics and there is no reason to use the racy pre-allocations.

Note that ttm_bo_man_get_node() does not run in atomic context. Nouveau already
uses GFP_KERNEL alloc in nouveau/nouveau_ttm.c in nouveau_gart_manager_new(). So
we can do the same in ttm_bo_man_get_node().

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
---
 drivers/gpu/drm/ttm/ttm_bo_manager.c | 40 +++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 21 deletions(-)

Comments

Daniel Vetter July 25, 2013, 2:25 p.m. UTC | #1
On Thu, Jul 25, 2013 at 03:56:00PM +0200, David Herrmann wrote:
> Instead of calling drm_mm_pre_get() in a row, we now preallocate the node
> and then use the atomic insertion functions. This has the exact same
> semantics and there is no reason to use the racy pre-allocations.
> 
> Note that ttm_bo_man_get_node() does not run in atomic context. Nouveau already
> uses GFP_KERNEL alloc in nouveau/nouveau_ttm.c in nouveau_gart_manager_new(). So
> we can do the same in ttm_bo_man_get_node().
> 
> Signed-off-by: David Herrmann <dh.herrmann@gmail.com>

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> ---
>  drivers/gpu/drm/ttm/ttm_bo_manager.c | 40 +++++++++++++++++-------------------
>  1 file changed, 19 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> index e4367f9..cbd2ec7 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> @@ -61,28 +61,24 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
>  	lpfn = placement->lpfn;
>  	if (!lpfn)
>  		lpfn = man->size;
> -	do {
> -		ret = drm_mm_pre_get(mm);
> -		if (unlikely(ret))
> -			return ret;
>  
> -		spin_lock(&rman->lock);
> -		node = drm_mm_search_free_in_range(mm,
> -					mem->num_pages, mem->page_alignment,
> -					placement->fpfn, lpfn, 1);
> -		if (unlikely(node == NULL)) {
> -			spin_unlock(&rman->lock);
> -			return 0;
> -		}
> -		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
> -						     mem->page_alignment,
> -						     placement->fpfn,
> -						     lpfn);
> -		spin_unlock(&rman->lock);
> -	} while (node == NULL);
> +	node = kzalloc(sizeof(*node), GFP_KERNEL);
> +	if (!node)
> +		return -ENOMEM;
> +
> +	spin_lock(&rman->lock);
> +	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
> +					  mem->page_alignment,
> +					  placement->fpfn, lpfn, true);
> +	spin_unlock(&rman->lock);
> +
> +	if (unlikely(ret)) {
> +		kfree(node);
> +	} else {
> +		mem->mm_node = node;
> +		mem->start = node->start;
> +	}
>  
> -	mem->mm_node = node;
> -	mem->start = node->start;
>  	return 0;
>  }
>  
> @@ -93,8 +89,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
>  
>  	if (mem->mm_node) {
>  		spin_lock(&rman->lock);
> -		drm_mm_put_block(mem->mm_node);
> +		drm_mm_remove_node(mem->mm_node);
>  		spin_unlock(&rman->lock);
> +
> +		kfree(mem->mm_node);
>  		mem->mm_node = NULL;
>  	}
>  }
> -- 
> 1.8.3.3
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f9..cbd2ec7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,24 @@  static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	lpfn = placement->lpfn;
 	if (!lpfn)
 		lpfn = man->size;
-	do {
-		ret = drm_mm_pre_get(mm);
-		if (unlikely(ret))
-			return ret;
 
-		spin_lock(&rman->lock);
-		node = drm_mm_search_free_in_range(mm,
-					mem->num_pages, mem->page_alignment,
-					placement->fpfn, lpfn, 1);
-		if (unlikely(node == NULL)) {
-			spin_unlock(&rman->lock);
-			return 0;
-		}
-		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-						     mem->page_alignment,
-						     placement->fpfn,
-						     lpfn);
-		spin_unlock(&rman->lock);
-	} while (node == NULL);
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	spin_lock(&rman->lock);
+	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+					  mem->page_alignment,
+					  placement->fpfn, lpfn, true);
+	spin_unlock(&rman->lock);
+
+	if (unlikely(ret)) {
+		kfree(node);
+	} else {
+		mem->mm_node = node;
+		mem->start = node->start;
+	}
 
-	mem->mm_node = node;
-	mem->start = node->start;
 	return 0;
 }
 
@@ -93,8 +89,10 @@  static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 
 	if (mem->mm_node) {
 		spin_lock(&rman->lock);
-		drm_mm_put_block(mem->mm_node);
+		drm_mm_remove_node(mem->mm_node);
 		spin_unlock(&rman->lock);
+
+		kfree(mem->mm_node);
 		mem->mm_node = NULL;
 	}
 }