diff mbox

[56/68] drm/i915/bdw: Abstract PDP usage

Message ID 1408677155-1840-57-git-send-email-benjamin.widawsky@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky Aug. 22, 2014, 3:12 a.m. UTC
Up until now, ppgtt->pdp has always been the root of our page tables.
Legacy 32b addresses acted like it had 1 PDP with 4 PDPEs.

In preparation for 4 level page tables, we need to stop use ppgtt->pdp
directly unless we know it's what we want. The future structure will use
ppgtt->pml4 for the top level, and the pdp is just one of the entries
being pointed to by a pml4e.

This patch addresses some carelessness done throughout development wrt
assumptions made of the root page tables.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 160 ++++++++++++++++++++----------------
 1 file changed, 88 insertions(+), 72 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8e15842..7cc6cf9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -491,6 +491,7 @@  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -502,7 +503,7 @@  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 				      I915_CACHE_LLC, use_scratch);
 
 	while (num_entries) {
-		struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+		struct i915_pagedir *pd = pdp->pagedirs[pdpe];
 		struct i915_pagetab *pt = pd->page_tables[pde];
 		struct page *page_table = pt->page;
 
@@ -536,6 +537,7 @@  static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt =
 		container_of(vm, struct i915_hw_ppgtt, base);
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	gen8_gtt_pte_t *pt_vaddr;
 	unsigned pdpe = gen8_pdpe_index(start);
 	unsigned pde = gen8_pde_index(start);
@@ -546,7 +548,7 @@  static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL) {
-			struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
+			struct i915_pagedir *pd = pdp->pagedirs[pdpe];
 			struct i915_pagetab *pt = pd->page_tables[pde];
 			struct page *page_table = pt->page;
 			pt_vaddr = kmap_atomic(page_table);
@@ -604,24 +606,26 @@  static void gen8_map_pagetable_range(struct i915_pagedir *pd,
 	kunmap_atomic(pagedir);
 }
 
-static void __gen8_teardown_va_range(struct i915_address_space *vm,
-				     uint64_t start, uint64_t length,
-				     bool dead)
+static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
+					struct i915_pagedirpo *pdp,
+					uint64_t start, uint64_t length,
+					bool dead)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		        container_of(vm, struct i915_hw_ppgtt, base);
 	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
 	struct i915_pagetab *pt;
 	uint64_t temp;
 	uint32_t pdpe, pde;
 
-	if (!ppgtt->pdp.pagedirs) {
+	BUG_ON(!pdp);
+	if (!pdp->pagedirs) {
+		WARN(!bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)),
+		     "Page directory leak detected\n");
 		/* If pagedirs are already free, there is nothing to do.*/
 		return;
 	}
 
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
 
@@ -629,19 +633,19 @@  static void __gen8_teardown_va_range(struct i915_address_space *vm,
 		 * down, and up.
 		 */
 		if (!pd) {
-			WARN(test_bit(pdpe, ppgtt->pdp.used_pdpes),
+			WARN(test_bit(pdpe, pdp->used_pdpes),
 			     "PDPE %d is not allocated, but is reserved (%p)\n",
 			     pdpe, vm);
 			continue;
 		} else {
 			if (dead && pd->zombie) {
-				WARN_ON(test_bit(pdpe, ppgtt->pdp.used_pdpes));
+				WARN_ON(test_bit(pdpe, pdp->used_pdpes));
 				free_pd_single(pd, vm->dev);
-				ppgtt->pdp.pagedirs[pdpe] = NULL;
+				pdp->pagedirs[pdpe] = NULL;
 				continue;
 			}
 
-			WARN(!test_bit(pdpe, ppgtt->pdp.used_pdpes),
+			WARN(!test_bit(pdpe, pdp->used_pdpes),
 			     "PDPE %d not reserved, but is allocated (%p)",
 			     pdpe, vm);
 		}
@@ -683,7 +687,7 @@  static void __gen8_teardown_va_range(struct i915_address_space *vm,
 		gen8_ppgtt_clear_range(vm, pd_start, pd_len, true);
 
 		if (bitmap_empty(pd->used_pdes, I915_PDES_PER_PD)) {
-			WARN_ON(!test_and_clear_bit(pdpe, ppgtt->pdp.used_pdpes));
+			WARN_ON(!test_and_clear_bit(pdpe, pdp->used_pdpes));
 			if (!dead) {
 				/* We've unmapped a possibly live context. Make
 				 * note of it so we can clean it up later. */
@@ -691,20 +695,32 @@  static void __gen8_teardown_va_range(struct i915_address_space *vm,
 				continue;
 			}
 			free_pd_single(pd, dev);
-			ppgtt->pdp.pagedirs[pdpe] = NULL;
+			pdp->pagedirs[pdpe] = NULL;
 		}
 	}
 
-	if (bitmap_empty(ppgtt->pdp.used_pdpes, I915_PDPES_PER_PDP(dev))) {
-		/* TODO: When pagetables are fully dynamic:
-		free_pdp_single(&ppgtt->pdp, dev); */
-	}
+	if (dead && bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)))
+		free_pdp_single(pdp, dev);
+}
+
+static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
+					struct i915_pml4 *pml4,
+					uint64_t start, uint64_t length,
+					bool dead)
+{
+	BUG();
 }
 
 static void gen8_teardown_va_range(struct i915_address_space *vm,
 				   uint64_t start, uint64_t length)
 {
-	__gen8_teardown_va_range(vm, start, length, false);
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (!HAS_48B_PPGTT(vm->dev))
+		gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, false);
+	else
+		gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, false);
 }
 
 static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -712,12 +728,10 @@  static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 	trace_i915_va_teardown(&ppgtt->base,
 			       ppgtt->base.start, ppgtt->base.total,
 			       VM_TO_TRACE_NAME(&ppgtt->base));
-	__gen8_teardown_va_range(&ppgtt->base,
-				 ppgtt->base.start, ppgtt->base.total,
-				 true);
-	WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes,
-			      I915_PDPES_PER_PDP(ppgtt->base.dev)));
-	free_pdp_single(&ppgtt->pdp, ppgtt->base.dev);
+	gen8_teardown_va_range_3lvl(&ppgtt->base, &ppgtt->pdp,
+				    ppgtt->base.start, ppgtt->base.total,
+				    true);
+	BUG_ON(ppgtt->pdp.pagedirs); /* FIXME: 48b */
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -733,7 +747,7 @@  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 
 /**
  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pd:		Page directory for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -749,12 +763,13 @@  static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
 				     struct i915_pagedir *pd,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pts)
 {
+	struct drm_device *dev = vm->dev;
 	struct i915_pagetab *pt;
 	uint64_t temp;
 	uint32_t pde;
@@ -772,7 +787,7 @@  static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 			continue;
 		}
 
-		pt = alloc_pt_single(ppgtt->base.dev);
+		pt = alloc_pt_single(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
@@ -784,14 +799,14 @@  static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, I915_PDES_PER_PD)
-		free_pt_single(pd->page_tables[pde], ppgtt->base.dev);
+		free_pt_single(pd->page_tables[pde], dev);
 
 	return -ENOMEM;
 }
 
 /**
  * gen8_ppgtt_alloc_pagedirs() - Allocate page directories for VA range.
- * @ppgtt:	Master ppgtt structure.
+ * @vm:		Master vm structure.
  * @pdp:	Page directory pointer for this address range.
  * @start:	Starting virtual address to begin allocations.
  * @length	Size of the allocations.
@@ -812,17 +827,17 @@  unwind_out:
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagedirs(struct i915_address_space *vm,
 				     struct i915_pagedirpo *pdp,
 				     uint64_t start,
 				     uint64_t length,
 				     unsigned long *new_pds)
 {
-	struct drm_device *dev = ppgtt->base.dev;
+	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
 	uint64_t temp;
 	uint32_t pdpe;
-	size_t pdpes =  I915_PDPES_PER_PDP(ppgtt->base.dev);
+	size_t pdpes =  I915_PDPES_PER_PDP(vm->dev);
 
 	BUG_ON(!bitmap_empty(new_pds, pdpes));
 
@@ -833,7 +848,7 @@  static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(ppgtt->base.dev);
+		pd = alloc_pd_single(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
@@ -845,7 +860,7 @@  static int gen8_ppgtt_alloc_pagedirs(struct i915_hw_ppgtt *ppgtt,
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, pdpes)
-		free_pd_single(pdp->pagedirs[pdpe], ppgtt->base.dev);
+		free_pd_single(pdp->pagedirs[pdpe], dev);
 
 	return -ENOMEM;
 }
@@ -899,12 +914,11 @@  err_out:
 	return -ENOMEM;
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start,
-			       uint64_t length)
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+				    struct i915_pagedirpo *pdp,
+				    uint64_t start,
+				    uint64_t length)
 {
-	struct i915_hw_ppgtt *ppgtt =
-		container_of(vm, struct i915_hw_ppgtt, base);
 	unsigned long *new_page_dirs, **new_page_tables;
 	struct drm_device *dev = vm->dev;
 	struct i915_pagedir *pd;
@@ -934,18 +948,15 @@  static int gen8_alloc_va_range(struct i915_address_space *vm,
 		return ret;
 
 	/* Do the allocations first so we can easily bail out */
-	ret = gen8_ppgtt_alloc_pagedirs(ppgtt, &ppgtt->pdp, start, length,
-					new_page_dirs);
+	ret = gen8_ppgtt_alloc_pagedirs(vm, pdp, start, length, new_page_dirs);
 	if (ret) {
 		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 		return ret;
 	}
 
-	/* For every page directory referenced, allocate page tables */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		bitmap_zero(new_page_tables[pdpe], I915_PDES_PER_PD);
-		ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
-						new_page_tables[pdpe]);
+		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, new_page_tables[pdpe]);
 		if (ret)
 			goto err_out;
 	}
@@ -953,10 +964,7 @@  static int gen8_alloc_va_range(struct i915_address_space *vm,
 	start = orig_start;
 	length = orig_length;
 
-	/* Allocations have completed successfully, so set the bitmaps, and do
-	 * the mappings. */
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_ppgtt_pde_t *const pagedir = kmap_atomic(pd->page);
+	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		struct i915_pagetab *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -978,25 +986,12 @@  static int gen8_alloc_va_range(struct i915_address_space *vm,
 
 			/* Our pde is now pointing to the pagetable, pt */
 			set_bit(pde, pd->used_pdes);
-
-			/* Map the PDE to the page table */
-			__gen8_do_map_pt(pagedir + pde, pt, vm->dev);
-
-			/* NB: We haven't yet mapped ptes to pages. At this
-			 * point we're still relying on insert_entries() */
-
-			/* No longer possible this page table is a zombie */
 			pt->zombie = 0;
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(pagedir, PAGE_SIZE);
-
-		kunmap_atomic(pagedir);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
-		/* This pd is officially not a zombie either */
-		ppgtt->pdp.pagedirs[pdpe]->zombie = 0;
+		set_bit(pdpe, pdp->used_pdpes);
+		gen8_map_pagetable_range(pd, start, length, dev);
+		pd->zombie = 0;
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
@@ -1005,16 +1000,36 @@  static int gen8_alloc_va_range(struct i915_address_space *vm,
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES_PER_PD)
-			free_pt_single(pd->page_tables[temp], vm->dev);
+			free_pt_single(pd->page_tables[temp], dev);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, pdpes)
-		free_pd_single(ppgtt->pdp.pagedirs[pdpe], vm->dev);
+		free_pd_single(pdp->pagedirs[pdpe], dev);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables, pdpes);
 	return ret;
 }
 
+static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+					       struct i915_pml4 *pml4,
+					       uint64_t start,
+					       uint64_t length)
+{
+	BUG();
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+			       uint64_t start, uint64_t length)
+{
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(vm, struct i915_hw_ppgtt, base);
+
+	if (!HAS_48B_PPGTT(vm->dev))
+		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+	else
+		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+}
+
 static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
 {
 	free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1060,12 +1075,13 @@  static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_pagedirpo *pdp = &ppgtt->pdp; /* FIXME: 48b */
 	struct i915_pagedir *pd;
 	uint64_t temp, start = 0, size = dev_priv->gtt.base.total;
 	uint32_t pdpe;
 	int ret;
 
-	ret = gen8_ppgtt_init_common(ppgtt, dev_priv->gtt.base.total);
+	ret = gen8_ppgtt_init_common(ppgtt, size);
 	if (ret)
 		return ret;
 
@@ -1078,8 +1094,8 @@  static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 		return ret;
 	}
 
-	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, size, temp, pdpe)
-		gen8_map_pagetable_range(pd, start, size, ppgtt->base.dev);
+	gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
+		gen8_map_pagetable_range(pd, start, size, dev);
 
 	ppgtt->base.allocate_va_range = NULL;
 	ppgtt->base.teardown_va_range = NULL;