diff mbox

[08/16] drm/i915: Plumb sg_iter through va allocation ->maps

Message ID 1432650084-24491-9-git-send-email-michel.thierry@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Michel Thierry May 26, 2015, 2:21 p.m. UTC
As a step towards implementing 4 levels, while not discarding the
existing pte map functions, we need to pass the sg_iter through. The
current function understands to the page directory granularity. An
object's pages may span the page directory, and so using the iter
directly as we write the PTEs allows the iterator to stay coherent
through a VMA mapping operation spanning multiple page table levels.

v2: Rebase after s/page_tables/page_table/.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 46 +++++++++++++++++++++++--------------
 1 file changed, 29 insertions(+), 17 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e71dbfc..2b6ee8e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -661,7 +661,7 @@  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 
 static void
 gen8_ppgtt_insert_pte_entries(struct i915_page_directory_pointer *pdp,
-			      struct sg_table *pages,
+			      struct sg_page_iter *sg_iter,
 			      uint64_t start,
 			      enum i915_cache_level cache_level,
 			      const bool flush)
@@ -670,11 +670,10 @@  gen8_ppgtt_insert_pte_entries(struct i915_page_directory_pointer *pdp,
 	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
 	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
 	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
-	struct sg_page_iter sg_iter;
 
 	pt_vaddr = NULL;
 
-	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+	while (__sg_page_iter_next(sg_iter)) {
 		if (pt_vaddr == NULL) {
 			struct i915_page_directory *pd = pdp->page_directory[pdpe];
 			struct i915_page_table *pt = pd->page_table[pde];
@@ -684,7 +683,7 @@  gen8_ppgtt_insert_pte_entries(struct i915_page_directory_pointer *pdp,
 		}
 
 		pt_vaddr[pte] =
-			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
 					cache_level, true);
 		if (++pte == GEN8_PTES) {
 			if (flush)
@@ -713,8 +712,10 @@  static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 {
 	struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
 	struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
+	struct sg_page_iter sg_iter;
 
-	gen8_ppgtt_insert_pte_entries(pdp, pages, start, cache_level, !HAS_LLC(vm->dev));
+	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
+	gen8_ppgtt_insert_pte_entries(pdp, &sg_iter, start, cache_level, !HAS_LLC(vm->dev));
 }
 
 static void __gen8_do_map_pt(gen8_pde_t * const pde,
@@ -1067,10 +1068,12 @@  err_out:
 	return -ENOMEM;
 }
 
-static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
-				    struct i915_page_directory_pointer *pdp,
-				    uint64_t start,
-				    uint64_t length)
+static int __gen8_alloc_vma_range_3lvl(struct i915_address_space *vm,
+				       struct i915_page_directory_pointer *pdp,
+				       struct sg_page_iter *sg_iter,
+				       uint64_t start,
+				       uint64_t length,
+				       u32 flags)
 {
 	unsigned long *new_page_dirs, **new_page_tables;
 	struct drm_device *dev = vm->dev;
@@ -1129,7 +1132,11 @@  static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 				   gen8_pte_index(pd_start),
 				   gen8_pte_count(pd_start, pd_len));
 
-			/* Our pde is now pointing to the pagetable, pt */
+			if (sg_iter) {
+				WARN_ON(!sg_iter->__nents);
+				gen8_ppgtt_insert_pte_entries(pdp, sg_iter, pd_start,
+							      flags, !HAS_LLC(vm->dev));
+			}
 			set_bit(pde, pd->used_pdes);
 		}
 
@@ -1154,10 +1161,12 @@  err_out:
 	return ret;
 }
 
-static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
-				    struct i915_pml4 *pml4,
-				    uint64_t start,
-				    uint64_t length)
+static int __gen8_alloc_vma_range_4lvl(struct i915_address_space *vm,
+				       struct i915_pml4 *pml4,
+				       struct sg_page_iter *sg_iter,
+				       uint64_t start,
+				       uint64_t length,
+				       u32 flags)
 {
 	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
 	struct i915_hw_ppgtt *ppgtt =
@@ -1200,7 +1209,8 @@  static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
 	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
 		WARN_ON(!pdp);
 
-		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
+		ret = __gen8_alloc_vma_range_3lvl(vm, pdp, sg_iter,
+						  start, length, flags);
 		if (ret)
 			goto err_out;
 
@@ -1226,9 +1236,11 @@  static int gen8_alloc_va_range(struct i915_address_space *vm,
 		container_of(vm, struct i915_hw_ppgtt, base);
 
 	if (USES_FULL_48BIT_PPGTT(vm->dev))
-		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+		return __gen8_alloc_vma_range_4lvl(vm, &ppgtt->pml4, NULL,
+						   start, length, 0);
 	else
-		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+		return __gen8_alloc_vma_range_3lvl(vm, &ppgtt->pdp, NULL,
+						   start, length, 0);
 }
 
 /*