diff mbox

[7/7] drm/radeon: don't allocate PD/PT BO list any more

Message ID 1417096126-14859-7-git-send-email-deathsimple@vodafone.de (mailing list archive)
State New, archived
Headers show

Commit Message

Christian König Nov. 27, 2014, 1:48 p.m. UTC
From: Christian König <christian.koenig@amd.com>

Just use the list structure directly for representing the entries.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon.h     | 12 +++---
 drivers/gpu/drm/radeon/radeon_cs.c  |  4 +-
 drivers/gpu/drm/radeon/radeon_gem.c | 11 ++----
 drivers/gpu/drm/radeon/radeon_vm.c  | 75 ++++++++++++++++---------------------
 4 files changed, 41 insertions(+), 61 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 699446a..d652ccb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -911,8 +911,8 @@  struct radeon_mec {
 				  R600_PTE_SYSTEM | R600_PTE_VALID )
 
 struct radeon_vm_pt {
-	struct radeon_bo		*bo;
-	uint64_t			addr;
+	struct radeon_bo_list	list;
+	uint64_t		addr;
 };
 
 struct radeon_vm_id {
@@ -939,7 +939,7 @@  struct radeon_vm {
 	struct list_head	freed;
 
 	/* contains the page directory */
-	struct radeon_bo	*page_directory;
+	struct radeon_bo_list	page_directory;
 	unsigned		max_pde_used;
 
 	/* array of page tables, one for each page directory entry */
@@ -1077,7 +1077,6 @@  struct radeon_cs_parser {
 	unsigned		nrelocs;
 	struct radeon_bo_list	*relocs;
 	struct radeon_bo_list	**relocs_ptr;
-	struct radeon_bo_list	*vm_bos;
 	struct list_head	validated;
 	unsigned		dma_reloc_idx;
 	/* indices of various chunks */
@@ -2976,9 +2975,8 @@  int radeon_vm_manager_init(struct radeon_device *rdev);
 void radeon_vm_manager_fini(struct radeon_device *rdev);
 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
-					  struct radeon_vm *vm,
-                                          struct list_head *head);
+void radeon_vm_add_bos(struct radeon_device *rdev, struct radeon_vm *vm,
+		       struct list_head *head);
 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
 				       struct radeon_vm *vm, int ring);
 void radeon_vm_flush(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7a90378..551980c 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,8 +193,7 @@  static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 	radeon_cs_buckets_get_list(&buckets, &p->validated);
 
 	if (p->cs_flags & RADEON_CS_USE_VM)
-		p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
-					      &p->validated);
+		radeon_vm_add_bos(p->rdev, p->ib.vm, &p->validated);
 
 	if (need_mmap_lock)
 		down_read(&current->mm->mmap_sem);
@@ -450,7 +449,6 @@  static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
 	kfree(parser->track);
 	kfree(parser->relocs);
 	kfree(parser->relocs_ptr);
-	drm_free_large(parser->vm_bos);
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 4eafec6..26d1af1 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -554,7 +554,6 @@  static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 				    struct radeon_bo_va *bo_va)
 {
 	struct ttm_validate_buffer tv, *entry;
-	struct radeon_bo_list *vm_bos;
 	struct ww_acquire_ctx ticket;
 	struct list_head list;
 	unsigned domain;
@@ -566,13 +565,11 @@  static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 	tv.shared = true;
 	list_add(&tv.head, &list);
 
-	vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
-	if (!vm_bos)
-		return;
+	radeon_vm_add_bos(rdev, bo_va->vm, &list);
 
 	r = ttm_eu_reserve_buffers(&ticket, &list, true);
 	if (r)
-		goto error_free;
+		goto error_done;
 
 	list_for_each_entry(entry, &list, head) {
 		domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
@@ -592,9 +589,7 @@  static void radeon_gem_va_update_vm(struct radeon_device *rdev,
 error_unreserve:
 	ttm_eu_backoff_reservation(&ticket, &list);
 
-error_free:
-	drm_free_large(vm_bos);
-
+error_done:
 	if (r)
 		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 658183f..b09c998 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -117,7 +117,7 @@  void radeon_vm_manager_fini(struct radeon_device *rdev)
 }
 
 /**
- * radeon_vm_get_bos - add the vm BOs to a validation list
+ * radeon_vm_add_bos - add the PD/PT BOs to a validation list
  *
  * @vm: vm providing the BOs
  * @head: head of validation list
@@ -125,41 +125,18 @@  void radeon_vm_manager_fini(struct radeon_device *rdev)
  * Add the page directory to the list of BOs to
  * validate for command submission (cayman+).
  */
-struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
-					  struct radeon_vm *vm,
-					  struct list_head *head)
+void radeon_vm_add_bos(struct radeon_device *rdev, struct radeon_vm *vm,
+		       struct list_head *head)
 {
-	struct radeon_bo_list *list;
 	unsigned i, idx;
 
-	list = drm_malloc_ab(vm->max_pde_used + 2,
-			     sizeof(struct radeon_bo_list));
-	if (!list)
-		return NULL;
-
-	/* add the vm page table to the list */
-	list[0].robj = vm->page_directory;
-	list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
-	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
-	list[0].tv.bo = &vm->page_directory->tbo;
-	list[0].tv.shared = true;
-	list[0].tiling_flags = 0;
-	list_add(&list[0].tv.head, head);
-
+	list_add(&vm->page_directory.tv.head, head);
 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
-		if (!vm->page_tables[i].bo)
+		if (!vm->page_tables[i].list.robj)
 			continue;
 
-		list[idx].robj = vm->page_tables[i].bo;
-		list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
-		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
-		list[idx].tv.bo = &list[idx].robj->tbo;
-		list[idx].tv.shared = true;
-		list[idx].tiling_flags = 0;
-		list_add(&list[idx++].tv.head, head);
+		list_add(&vm->page_tables[i].list.tv.head, head);
 	}
-
-	return list;
 }
 
 /**
@@ -237,7 +214,7 @@  void radeon_vm_flush(struct radeon_device *rdev,
 		     struct radeon_vm *vm,
 		     int ring, struct radeon_fence *updates)
 {
-	uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+	uint64_t pd_addr = vm->page_directory.gpu_offset;
 	struct radeon_vm_id *vm_id = &vm->ids[ring];
 
 	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
@@ -528,9 +505,10 @@  int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
+		struct radeon_vm_pt *entry = &vm->page_tables[pt_idx];
 		struct radeon_bo *pt;
 
-		if (vm->page_tables[pt_idx].bo)
+		if (entry->list.robj)
 			continue;
 
 		r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
@@ -547,8 +525,13 @@  int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 			return r;
 		}
 
-		vm->page_tables[pt_idx].addr = 0;
-		vm->page_tables[pt_idx].bo = pt;
+		entry->list.robj = pt;
+		entry->list.prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+		entry->list.allowed_domains = RADEON_GEM_DOMAIN_VRAM;
+		entry->list.tv.bo = &pt->tbo;
+		entry->list.tv.shared = true;
+		entry->list.tiling_flags = 0;
+		entry->addr = 0;
 	}
 
 	return 0;
@@ -614,7 +597,7 @@  static uint32_t radeon_vm_page_flags(uint32_t flags)
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
 				    struct radeon_vm *vm)
 {
-	struct radeon_bo *pd = vm->page_directory;
+	struct radeon_bo *pd = vm->page_directory.robj;
 	uint64_t pd_addr = radeon_bo_gpu_offset(pd);
 	uint32_t incr = RADEON_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
@@ -639,7 +622,7 @@  int radeon_vm_update_page_directory(struct radeon_device *rdev,
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
-		struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
+		struct radeon_bo *bo = vm->page_tables[pt_idx].list.robj;
 		uint64_t pde, pt;
 
 		if (bo == NULL)
@@ -793,7 +776,7 @@  static int radeon_vm_update_ptes(struct radeon_device *rdev,
 	/* walk over the address space and update the page tables */
 	for (addr = start; addr < end; ) {
 		uint64_t pt_idx = addr >> radeon_vm_block_size;
-		struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
+		struct radeon_bo *pt = vm->page_tables[pt_idx].list.robj;
 		unsigned nptes;
 		uint64_t pte;
 		int r;
@@ -860,7 +843,7 @@  static void radeon_vm_fence_pts(struct radeon_vm *vm,
 	end >>= radeon_vm_block_size;
 
 	for (i = start; i <= end; ++i)
-		radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+		radeon_bo_fence(vm->page_tables[i].list.robj, fence, true);
 }
 
 /**
@@ -1162,17 +1145,23 @@  int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 
 	r = radeon_bo_create(rdev, pd_size, align, true,
 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
-			     NULL, &vm->page_directory);
+			     NULL, &vm->page_directory.robj);
 	if (r)
 		return r;
 
-	r = radeon_vm_clear_bo(rdev, vm->page_directory);
+	r = radeon_vm_clear_bo(rdev, vm->page_directory.robj);
 	if (r) {
-		radeon_bo_unref(&vm->page_directory);
-		vm->page_directory = NULL;
+		radeon_bo_unref(&vm->page_directory.robj);
+		vm->page_directory.robj = NULL;
 		return r;
 	}
 
+	vm->page_directory.prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+	vm->page_directory.allowed_domains = RADEON_GEM_DOMAIN_VRAM;
+	vm->page_directory.tv.bo = &vm->page_directory.robj->tbo;
+	vm->page_directory.tv.shared = true;
+	vm->page_directory.tiling_flags = 0;
+
 	return 0;
 }
 
@@ -1210,10 +1199,10 @@  void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 	}
 
 	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
-		radeon_bo_unref(&vm->page_tables[i].bo);
+		radeon_bo_unref(&vm->page_tables[i].list.robj);
 	kfree(vm->page_tables);
 
-	radeon_bo_unref(&vm->page_directory);
+	radeon_bo_unref(&vm->page_directory.robj);
 
 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 		radeon_fence_unref(&vm->ids[i].flushed_updates);