diff mbox

[08/13] drm/amdgpu: use RB tree instead of link list

Message ID 20180509064543.15937-9-david1.zhou@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Chunming Zhou May 9, 2018, 6:45 a.m. UTC
Change-Id: Iaca5cdaccbc5beeb7a37c0f703cdfc97df4ece4f
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     | 85 +++++++++++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  3 +-
 4 files changed, 82 insertions(+), 9 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index f04fc401327b..b6396230d30e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -82,6 +82,8 @@  struct amdgpu_bo {
 	struct ttm_placement		placement;
 	struct ttm_buffer_object	tbo;
 	struct ttm_bo_kmap_obj		kmap;
+	struct rb_node			node;
+	u64				index;
 	u64				flags;
 	unsigned			pin_count;
 	u64				tiling_flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 207f88f38b23..a5d8f511b011 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1279,6 +1279,7 @@  static struct ttm_bo_driver amdgpu_bo_driver = {
 	.invalidate_caches = &amdgpu_invalidate_caches,
 	.init_mem_type = &amdgpu_init_mem_type,
 	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
+	.lru_empty = &amdgpu_vm_lru_empty,
 	.get_evictable_bo = &amdgpu_vm_get_evictable_bo,
 	.add_to_lru = &amdgpu_vm_add_to_lru,
 	.del_from_lru = &amdgpu_vm_del_from_lru,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 27b3fdb6dd46..1a09c07bbf20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -133,7 +133,7 @@  int amdgpu_vm_lru_init(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev,
 	INIT_LIST_HEAD(&vm_lru->vm_lru_list);
 	for (i = 0; i < TTM_NUM_MEM_TYPES; i++) {
 		for (j = 0; j < TTM_MAX_BO_PRIORITY; j++) {
-			INIT_LIST_HEAD(&vm_lru->fixed_lru[i][j]);
+			vm_lru->fixed_lru[i][j] = RB_ROOT;
 			INIT_LIST_HEAD(&vm_lru->dynamic_lru[i][j]);
 		}
 	}
@@ -157,6 +157,24 @@  int amdgpu_vm_lru_fini(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev)
 	return 0;
 }
 
+bool amdgpu_vm_lru_empty(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+	struct amdgpu_vm_lru *vm_lru;
+	int i;
+
+	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+		list_for_each_entry(vm_lru, &adev->vm_lru_list, vm_lru_list) {
+			if (!list_empty(&vm_lru->dynamic_lru[mem_type][i]))
+				return false;
+			if (!RB_EMPTY_ROOT(&vm_lru->fixed_lru[mem_type][i]))
+				return false;
+		}
+	}
+
+	return true;
+}
+
 struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 						     uint32_t mem_type,
 						     const struct ttm_place *place,
@@ -165,11 +183,13 @@  struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
 	struct ttm_buffer_object *bo = NULL;
+	struct amdgpu_bo *abo = NULL;
 	struct amdgpu_vm_lru *vm_lru;
 	int i;
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		list_for_each_entry(vm_lru, &adev->vm_lru_list, vm_lru_list) {
+			struct rb_node *node;
 			list_for_each_entry(bo, &vm_lru->dynamic_lru[mem_type][i], lru) {
 				if (!ttm_bo_evict_swapout_allowable(bo, ctx, locked))
 					continue;
@@ -184,20 +204,22 @@  struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 			if (&bo->lru != &vm_lru->dynamic_lru[mem_type][i])
 				break;
 			bo = NULL;
-			list_for_each_entry(bo, &vm_lru->fixed_lru[mem_type][i], lru) {
-				if (!ttm_bo_evict_swapout_allowable(bo, ctx, locked))
+			for (node = rb_first(&vm_lru->fixed_lru[mem_type][i]);
+			     node; node = rb_next(node)) {
+				abo = rb_entry(node, struct amdgpu_bo, node);
+				bo = &abo->tbo;
+				if (!ttm_bo_evict_swapout_allowable(bo, ctx, locked)) {
+					bo = NULL;
 					continue;
+				}
 				if (place && !bdev->driver->eviction_valuable(bo, place)) {
 					if (locked)
 						reservation_object_unlock(bo->resv);
+					bo = NULL;
 					continue;
 				}
 				break;
 			}
-			/* If the inner loop terminated early, we have our candidate */
-			if (&bo->lru != &vm_lru->fixed_lru[mem_type][i])
-				break;
-			bo = NULL;
 		}
 		if (bo)
 			break;
@@ -207,6 +229,26 @@  struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 
 }
 
+static void amdgpu_vm_bo_add_to_rb(struct amdgpu_bo *bo,
+				   struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	while (*new) {
+		struct amdgpu_bo *this =
+			container_of(*new, struct amdgpu_bo, node);
+
+		parent = *new;
+		if (bo->index < this->index)
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&bo->node, parent, new);
+	rb_insert_color(&bo->node, root);
+}
+
 void amdgpu_vm_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -215,7 +257,7 @@  void amdgpu_vm_add_to_lru(struct ttm_buffer_object *bo)
 
 	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 		if (bo->resv == vm_lru->resv)
-			list_add_tail(&bo->lru, &vm_lru->fixed_lru[bo->mem.mem_type][bo->priority]);
+			amdgpu_vm_bo_add_to_rb(abo, &vm_lru->fixed_lru[bo->mem.mem_type][bo->priority]);
 		else
 			list_add_tail(&bo->lru, &vm_lru->dynamic_lru[bo->mem.mem_type][bo->priority]);
 		kref_get(&bo->list_kref);
@@ -230,9 +272,36 @@  void amdgpu_vm_add_to_lru(struct ttm_buffer_object *bo)
 
 }
 
+static struct amdgpu_bo *amdgpu_vm_bo_rb_find(struct rb_root *root, u64 index)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct amdgpu_bo *bo =
+			container_of(node, struct amdgpu_bo, node);
+
+	if (index < bo->index)
+		node = node->rb_left;
+	else if (index > bo->index)
+		node = node->rb_right;
+	else
+		return bo;
+	}
+
+	return NULL;
+}
+
 void amdgpu_vm_del_from_lru(struct ttm_buffer_object *bo)
 {
+	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+	struct amdgpu_vm_lru *vm_lru = abo->vm_lru;
 
+	if (amdgpu_vm_bo_rb_find(&vm_lru->fixed_lru[bo->mem.mem_type][bo->priority],
+				 abo->index)) {
+		rb_erase(&abo->node,
+			 &vm_lru->fixed_lru[abo->tbo.mem.mem_type][abo->tbo.priority]);
+		kref_put(&abo->tbo.list_kref, ttm_bo_ref_bug);
+	}
 }
 
 void amdgpu_vm_move_to_lru_tail(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 66ee902614a2..84400673d710 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -138,7 +138,7 @@  enum amdgpu_vm_level {
 
 struct amdgpu_vm_lru {
 	struct list_head vm_lru_list;
-	struct list_head fixed_lru[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+	struct rb_root fixed_lru[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
 	struct list_head dynamic_lru[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
 	struct reservation_object *resv;
 };
@@ -269,6 +269,7 @@  int amdgpu_vm_lru_init(struct amdgpu_vm_lru *vm_lru, struct amdgpu_device *adev,
 		       struct reservation_object *resv);
 int amdgpu_vm_lru_fini(struct amdgpu_vm_lru *vm_lru,
 		       struct amdgpu_device *adev);
+bool amdgpu_vm_lru_empty(struct ttm_bo_device *bdev, unsigned mem_type);
 
 struct ttm_buffer_object *amdgpu_vm_get_evictable_bo(struct ttm_bo_device *bdev,
 						     uint32_t mem_type,