diff mbox series

[12/13] drm/vmwgfx: switch the TTM backends to self alloc

Message ID 20210430092508.60710-12-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/13] drm/ttm: add ttm_sys_manager v2 | expand

Commit Message

Christian König April 30, 2021, 9:25 a.m. UTC
Similar to the TTM range manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 18 +++++----
 drivers/gpu/drm/vmwgfx/vmwgfx_thp.c           | 37 ++++++++++---------
 2 files changed, 31 insertions(+), 24 deletions(-)

Comments

Matthew Auld May 5, 2021, 4:49 p.m. UTC | #1
On Fri, 30 Apr 2021 at 10:25, Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Similar to the TTM range manager.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 1774960d1b89..82a5e6489810 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -57,6 +57,12 @@  static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 	int id;
 
+	mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem->mm_node)
+		return -ENOMEM;
+
+	ttm_resource_init(bo, place, mem->mm_node);
+
 	id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
 	if (id < 0)
 		return id;
@@ -87,13 +93,11 @@  static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
 {
 	struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
 
-	if (mem->mm_node) {
-		ida_free(&gman->gmr_ida, mem->start);
-		spin_lock(&gman->lock);
-		gman->used_gmr_pages -= mem->num_pages;
-		spin_unlock(&gman->lock);
-		mem->mm_node = NULL;
-	}
+	ida_free(&gman->gmr_ida, mem->start);
+	spin_lock(&gman->lock);
+	gman->used_gmr_pages -= mem->num_pages;
+	spin_unlock(&gman->lock);
+	kfree(mem->mm_node);
 }
 
 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index 5ccc35b3194c..8765835696ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -7,6 +7,7 @@ 
 #include "vmwgfx_drv.h"
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
 
 /**
  * struct vmw_thp_manager - Range manager implementing huge page alignment
@@ -54,16 +55,18 @@  static int vmw_thp_get_node(struct ttm_resource_manager *man,
 {
 	struct vmw_thp_manager *rman = to_thp_manager(man);
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node;
+	struct ttm_range_mgr_node *node;
 	unsigned long align_pages;
 	unsigned long lpfn;
 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
 	int ret;
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
 	if (!node)
 		return -ENOMEM;
 
+	ttm_resource_init(bo, place, &node->base);
+
 	lpfn = place->lpfn;
 	if (!lpfn)
 		lpfn = man->size;
@@ -76,8 +79,9 @@  static int vmw_thp_get_node(struct ttm_resource_manager *man,
 	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
 		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
 		if (mem->num_pages >= align_pages) {
-			ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
-						     place, mem, lpfn, mode);
+			ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+						     align_pages, place, mem,
+						     lpfn, mode);
 			if (!ret)
 				goto found_unlock;
 		}
@@ -85,14 +89,15 @@  static int vmw_thp_get_node(struct ttm_resource_manager *man,
 
 	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
 	if (mem->num_pages >= align_pages) {
-		ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
-					     mem, lpfn, mode);
+		ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
+					     align_pages, place, mem, lpfn,
+					     mode);
 		if (!ret)
 			goto found_unlock;
 	}
 
-	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
-					  bo->page_alignment, 0,
+	ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
+					  mem->num_pages, bo->page_alignment, 0,
 					  place->fpfn, lpfn, mode);
 found_unlock:
 	spin_unlock(&rman->lock);
@@ -100,8 +105,8 @@  static int vmw_thp_get_node(struct ttm_resource_manager *man,
 	if (unlikely(ret)) {
 		kfree(node);
 	} else {
-		mem->mm_node = node;
-		mem->start = node->start;
+		mem->mm_node = &node->mm_nodes[0];
+		mem->start = node->mm_nodes[0].start;
 	}
 
 	return ret;
@@ -113,15 +118,13 @@  static void vmw_thp_put_node(struct ttm_resource_manager *man,
 			     struct ttm_resource *mem)
 {
 	struct vmw_thp_manager *rman = to_thp_manager(man);
+	struct ttm_range_mgr_node * node = mem->mm_node;
 
-	if (mem->mm_node) {
-		spin_lock(&rman->lock);
-		drm_mm_remove_node(mem->mm_node);
-		spin_unlock(&rman->lock);
+	spin_lock(&rman->lock);
+	drm_mm_remove_node(&node->mm_nodes[0]);
+	spin_unlock(&rman->lock);
 
-		kfree(mem->mm_node);
-		mem->mm_node = NULL;
-	}
+	kfree(node);
 }
 
 int vmw_thp_init(struct vmw_private *dev_priv)