diff mbox

[RFC,19/38] drm/i915: Generalize GEN6 mapping

Message ID 1412701894-28905-20-git-send-email-michel.thierry@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michel Thierry Oct. 7, 2014, 5:11 p.m. UTC
From: Ben Widawsky <benjamin.widawsky@intel.com>

Having a more general way of doing mappings will allow the ability to
easy map and unmap a specific page table. Specifically in this case, we
pass down the page directory + entry, and the page table to map. This
works similarly to the x86 code.

The same work will need to happen for GEN8. At that point I will try to
combine functionality.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 62 ++++++++++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |  2 ++
 2 files changed, 35 insertions(+), 29 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8a79142..2f01601 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -687,18 +687,13 @@  bail:
 
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
-	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
 	struct i915_address_space *vm = &ppgtt->base;
-	gen6_gtt_pte_t __iomem *pd_addr;
 	gen6_gtt_pte_t scratch_pte;
 	uint32_t pd_entry;
 	int pte, pde;
 
 	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
-	pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
-		ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
-
 	seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
 		   ppgtt->pd.pd_offset,
 		   ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
@@ -706,7 +701,7 @@  static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 		u32 expected;
 		gen6_gtt_pte_t *pt_vaddr;
 		dma_addr_t pt_addr = ppgtt->pd.page_tables[pde]->daddr;
-		pd_entry = readl(pd_addr + pde);
+		pd_entry = readl(ppgtt->pd_addr + pde);
 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
 		if (pd_entry != expected)
@@ -742,39 +737,43 @@  static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 	}
 }
 
-static void gen6_map_single(struct i915_hw_ppgtt *ppgtt,
-			    const unsigned pde_index,
-			    dma_addr_t daddr)
+/* Map pde (index) from the page directory @pd to the page table @pt */
+static void gen6_map_single(struct i915_pagedir *pd,
+			    const int pde, struct i915_pagetab *pt)
 {
-	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
-	uint32_t pd_entry;
-	gen6_gtt_pte_t __iomem *pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm;
-	pd_addr	+= ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
+	struct i915_hw_ppgtt *ppgtt =
+		container_of(pd, struct i915_hw_ppgtt, pd);
+	u32 pd_entry;
 
-	pd_entry = GEN6_PDE_ADDR_ENCODE(daddr);
+	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
 	pd_entry |= GEN6_PDE_VALID;
 
-	writel(pd_entry, pd_addr + pde_index);
+	writel(pd_entry, ppgtt->pd_addr + pde);
+
+	/* XXX: Caller needs to make sure the write completes if necessary */
 }
 
 /* Map all the page tables found in the ppgtt structure to incrementing page
  * directories. */
-static void gen6_map_page_tables(struct i915_hw_ppgtt *ppgtt)
+static void gen6_map_page_range(struct drm_i915_private *dev_priv,
+				struct i915_pagedir *pd, unsigned pde, size_t n)
 {
-	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
-	int i;
+	if (WARN_ON(pde + n > I915_PDES_PER_PD))
+		n = I915_PDES_PER_PD - pde;
 
-	WARN_ON(ppgtt->pd.pd_offset & 0x3f);
-	for (i = 0; i < ppgtt->num_pd_entries; i++)
-		gen6_map_single(ppgtt, i, ppgtt->pd.page_tables[i]->daddr);
+	n += pde;
 
+	for (; pde < n; pde++)
+		gen6_map_single(pd, pde, pd->page_tables[pde]);
+
+	/* Make sure write is complete before other code can use this page
+	 * table. Also require for WC mapped PTEs */
 	readl(dev_priv->gtt.gsm);
 }
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
 	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
-
 	return (ppgtt->pd.pd_offset / 64) << 16;
 }
 
@@ -1120,11 +1119,15 @@  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	ppgtt->pd.pd_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
 
+	ppgtt->pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
+		ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
+
+	gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->num_pd_entries);
+
 	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
 			 ppgtt->node.size >> 20,
 			 ppgtt->node.start / PAGE_SIZE);
 
-	gen6_map_page_tables(ppgtt);
 	DRM_DEBUG("Adding PPGTT at offset %x\n",
 		  ppgtt->pd.pd_offset << 10);
 
@@ -1402,13 +1405,14 @@  void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
 		/* TODO: Perhaps it shouldn't be gen6 specific */
-		if (i915_is_ggtt(vm)) {
-			if (dev_priv->mm.aliasing_ppgtt)
-				gen6_map_page_tables(dev_priv->mm.aliasing_ppgtt);
-			continue;
-		}
 
-		gen6_map_page_tables(container_of(vm, struct i915_hw_ppgtt, base));
+		struct i915_hw_ppgtt *ppgtt =
+			container_of(vm, struct i915_hw_ppgtt, base);
+
+		if (i915_is_ggtt(vm))
+			ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+		gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->num_pd_entries);
 	}
 
 	i915_ggtt_flush(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 329f75f..f3bdd40 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -288,6 +288,8 @@  struct i915_hw_ppgtt {
 
 	struct drm_i915_file_private *file_priv;
 
+	gen6_gtt_pte_t __iomem *pd_addr;
+
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
 			 struct intel_engine_cs *ring);