diff mbox

[2/2] drm/radeon: add 2-level VM pagetables support v4

Message ID 1347903379-12149-2-git-send-email-deathsimple@vodafone.de (mailing list archive)
State New, archived
Headers show

Commit Message

Christian König Sept. 17, 2012, 5:36 p.m. UTC
From: Dmitry Cherkasov <dcherkassov@gmail.com>

PDE/PTE update code uses CP ring for memory writes.
All page table entries are preallocated for now in alloc_pt().

It is made as whole because it's hard to divide it to several patches
that compile and doesn't break anything being applied separately.

Tested on cayman card.

v2: rebased on top of "refactor set_page chipset interface v3",
    code cleanups

v3: switched offsets calc macros to inline funcs where possible,
    remove pd_addr from radeon_vm, switched RADEON_BLOCK_SIZE define,
    to 9 (and PTE_COUNT to 1 << BLOCK_SIZE)

v4 (ck): move "incr" documentation to previous patch, cleanup and
         document RADEON_VM_* constants, change commit message to
         our usual format, simplify patch allot by removing
         everything current not necessary, disable SI workaround.

Signed-off-by: Dmitry Cherkasov <Dmitrii.Cherkasov@amd.com>
Signed-off-by: Christian König <deathsimple@vodafone.de>
---
 drivers/gpu/drm/radeon/ni.c          |    4 +-
 drivers/gpu/drm/radeon/radeon.h      |   12 +++++-
 drivers/gpu/drm/radeon/radeon_gart.c |   72 +++++++++++++++++++++++-----------
 drivers/gpu/drm/radeon/si.c          |    4 +-
 4 files changed, 63 insertions(+), 29 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b32e11b..88526c6 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -782,7 +782,7 @@  static int cayman_pcie_gart_enable(struct radeon_device *rdev)
 	       (u32)(rdev->dummy_page.addr >> 12));
 	WREG32(VM_CONTEXT1_CNTL2, 0);
 	WREG32(VM_CONTEXT1_CNTL, 0);
-	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	cayman_pcie_gart_tlb_flush(rdev);
@@ -1580,7 +1580,7 @@  void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
 	radeon_ring_write(ring, vm->last_pfn);
 
 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
-	radeon_ring_write(ring, vm->pt_gpu_addr >> 12);
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
 	/* flush hdp cache */
 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d0d414d..519d8a3 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -648,15 +648,23 @@  struct radeon_ring {
  * VM
  */
 
+/* maximum number of VMIDs */
 #define RADEON_NUM_VM	16
 
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE   9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
 struct radeon_vm {
 	struct list_head		list;
 	struct list_head		va;
 	unsigned			id;
 	unsigned			last_pfn;
-	u64				pt_gpu_addr;
-	u64				*pt;
+	u64				pd_gpu_addr;
 	struct radeon_sa_bo		*sa_bo;
 	struct mutex			mutex;
 	/* last fence for cs using this vm */
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index bb9fc59..59d1fcf 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -490,7 +490,6 @@  static void radeon_vm_free_pt(struct radeon_device *rdev,
 
 	list_del_init(&vm->list);
 	radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
-	vm->pt = NULL;
 
 	list_for_each_entry(bo_va, &vm->va, vm_list) {
 		bo_va->valid = false;
@@ -531,6 +530,18 @@  void radeon_vm_manager_fini(struct radeon_device *rdev)
 }
 
 /**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+	return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
+}
+
+/**
  * radeon_vm_alloc_pt - allocates a page table for a VM
  *
  * @rdev: radeon_device pointer
@@ -546,11 +557,16 @@  int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 {
 	struct radeon_vm *vm_evict;
 	int r;
+	u64 *pd_addr;
+	int tables_size;
 
 	if (vm == NULL) {
 		return -EINVAL;
 	}
 
+	tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+	tables_size = RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
+
 	if (vm->sa_bo != NULL) {
 		/* update lru */
 		list_del_init(&vm->list);
@@ -560,8 +576,7 @@  int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
 
 retry:
 	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
-			     RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
-			     RADEON_GPU_PAGE_SIZE, false);
+			     tables_size, RADEON_GPU_PAGE_SIZE, false);
 	if (r == -ENOMEM) {
 		if (list_empty(&rdev->vm_manager.lru_vm)) {
 			return r;
@@ -576,9 +591,9 @@  retry:
 		return r;
 	}
 
-	vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
-	vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
-	memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+	pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
+	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
+	memset(pd_addr, 0, tables_size);
 
 	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
 	return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
@@ -866,8 +881,8 @@  int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 	struct radeon_ring *ring = &rdev->ring[ridx];
 	struct radeon_semaphore *sem = NULL;
 	struct radeon_bo_va *bo_va;
-	unsigned ngpu_pages, ndw;
-	uint64_t pfn, addr;
+	unsigned nptes, npdes, ndw;
+	uint64_t pe, addr;
 	int r;
 
 	/* nothing to do if vm isn't bound */
@@ -889,10 +904,8 @@  int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 	if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
 		return 0;
 
-	ngpu_pages = radeon_bo_ngpu_pages(bo);
 	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
 	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
-	pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
 	if (mem) {
 		addr = mem->start << PAGE_SHIFT;
 		if (mem->mem_type != TTM_PL_SYSTEM) {
@@ -922,8 +935,14 @@  int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 
 	/* estimate number of dw needed */
 	ndw = 32;
-	ndw += (ngpu_pages >> 12) * 3;
-	ndw += ngpu_pages * 2;
+
+	nptes = radeon_bo_ngpu_pages(bo);
+	ndw += (nptes >> 12) * 3;
+	ndw += nptes * 2;
+
+	npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 1;
+	ndw += (npdes >> 12) * 3;
+	ndw += npdes * 2;
 
 	r = radeon_ring_lock(rdev, ring, ndw);
 	if (r) {
@@ -935,8 +954,22 @@  int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 		radeon_fence_note_sync(vm->fence, ridx);
 	}
 
-	radeon_asic_vm_set_page(rdev, vm->pt_gpu_addr + pfn * 8, addr,
-				ngpu_pages, RADEON_GPU_PAGE_SIZE, bo_va->flags);
+	/* update page table entries */
+	pe = vm->pd_gpu_addr;
+	pe += radeon_vm_directory_size(rdev);
+	pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
+
+	radeon_asic_vm_set_page(rdev, pe, addr, nptes,
+				RADEON_GPU_PAGE_SIZE, bo_va->flags);
+
+	/* update page directory entries */
+	addr = pe;
+
+	pe = vm->pd_gpu_addr;
+	pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
+
+	radeon_asic_vm_set_page(rdev, pe, addr, npdes,
+				RADEON_GPU_PAGE_SIZE, RADEON_VM_PAGE_VALID);
 
 	radeon_fence_unref(&vm->fence);
 	r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -1018,18 +1051,11 @@  int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
 
 	vm->id = 0;
 	vm->fence = NULL;
+	vm->last_pfn = 0;
 	mutex_init(&vm->mutex);
 	INIT_LIST_HEAD(&vm->list);
 	INIT_LIST_HEAD(&vm->va);
-	/* SI requires equal sized PTs for all VMs, so always set
-	 * last_pfn to max_pfn.  cayman allows variable sized
-	 * pts so we can grow then as needed.  Once we switch
-	 * to two level pts we can unify this again.
-	 */
-	if (rdev->family >= CHIP_TAHITI)
-		vm->last_pfn = rdev->vm_manager.max_pfn;
-	else
-		vm->last_pfn = 0;
+
 	/* map the ib pool buffer at 0 in virtual address space, set
 	 * read only
 	 */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2a5c337..156c994 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2426,7 +2426,7 @@  static int si_pcie_gart_enable(struct radeon_device *rdev)
 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
 	       (u32)(rdev->dummy_page.addr >> 12));
 	WREG32(VM_CONTEXT1_CNTL2, 0);
-	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
 	si_pcie_gart_tlb_flush(rdev);
@@ -2804,7 +2804,7 @@  void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
 		radeon_ring_write(ring, PACKET0(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
 						+ ((vm->id - 8) << 2), 0));
 	}
-	radeon_ring_write(ring, vm->pt_gpu_addr >> 12);
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
 	/* flush hdp cache */
 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));