diff mbox

[11/20] drm/i915/gtt: Introduce fill_page_dma()

Message ID 1432219068-25391-12-git-send-email-mika.kuoppala@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Mika Kuoppala May 21, 2015, 2:37 p.m. UTC
When we setup page directories and tables, we point the entries
to a to the next level scratch structure. Make this generic
by introducing a fill_page_dma which maps and flushes. We also
need 32 bit variant for legacy gens.

Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 61 +++++++++++++++++++------------------
 1 file changed, 31 insertions(+), 30 deletions(-)

Comments

Ville Syrjälä May 21, 2015, 3:16 p.m. UTC | #1
On Thu, May 21, 2015 at 05:37:39PM +0300, Mika Kuoppala wrote:
> When we setup page directories and tables, we point the entries
> to a to the next level scratch structure. Make this generic
> by introducing a fill_page_dma which maps and flushes. We also
> need 32 bit variant for legacy gens.
> 
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_gtt.c | 61 +++++++++++++++++++------------------
>  1 file changed, 31 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 5175eb8..a3ee710 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -330,6 +330,27 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
>  	memset(p, 0, sizeof(*p));
>  }
>  
> +static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
> +			  const uint64_t val)
> +{
> +	int i;
> +	uint64_t * const vaddr = kmap_atomic(p->page);
> +
> +	for (i = 0; i < 512; i++)
> +		vaddr[i] = val;
> +
> +	kunmap_atomic(vaddr);
> +}

Where did the clflushes go? Also please keep in mind only CHV needs the
clflush and VLV doesn't.

> +
> +static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
> +			     const uint32_t val32)
> +{
> +	uint64_t v = val32;
> +	v = v << 32 | val32;
> +
> +	fill_page_dma(dev, p, v);
> +}
> +
>  static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
>  {
>  	cleanup_page_dma(dev, &pt->base);
> @@ -340,19 +361,12 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
>  static void gen8_initialize_pt(struct i915_address_space *vm,
>  			       struct i915_page_table *pt)
>  {
> -	gen8_pte_t *pt_vaddr, scratch_pte;
> -	int i;
> +	gen8_pte_t scratch_pte;
>  
> -	pt_vaddr = kmap_atomic(pt->base.page);
>  	scratch_pte = gen8_pte_encode(vm->scratch.addr,
>  				      I915_CACHE_LLC, true);
>  
> -	for (i = 0; i < GEN8_PTES; i++)
> -		pt_vaddr[i] = scratch_pte;
> -
> -	if (!HAS_LLC(vm->dev))
> -		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> -	kunmap_atomic(pt_vaddr);
> +	fill_page_dma(vm->dev, &pt->base, scratch_pte);
>  }
>  
>  static struct i915_page_table *alloc_pt(struct drm_device *dev)
> @@ -585,20 +599,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
>  			       struct i915_page_directory *pd)
>  {
>  	struct i915_hw_ppgtt *ppgtt =
> -			container_of(vm, struct i915_hw_ppgtt, base);
> -	gen8_pde_t *page_directory;
> -	struct i915_page_table *pt;
> -	int i;
> +		container_of(vm, struct i915_hw_ppgtt, base);
> +	gen8_pde_t scratch_pde;
>  
> -	page_directory = kmap_atomic(pd->base.page);
> -	pt = ppgtt->scratch_pt;
> -	for (i = 0; i < I915_PDES; i++)
> -		/* Map the PDE to the page table */
> -		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
> +	scratch_pde = gen8_pde_encode(vm->dev, ppgtt->scratch_pt->base.daddr,
> +				      I915_CACHE_LLC);
>  
> -	if (!HAS_LLC(vm->dev))
> -		drm_clflush_virt_range(page_directory, PAGE_SIZE);
> -	kunmap_atomic(page_directory);
> +	fill_page_dma(vm->dev, &pd->base, scratch_pde);
>  }
>  
>  static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
> @@ -1242,22 +1249,16 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
>  }
>  
>  static void gen6_initialize_pt(struct i915_address_space *vm,
> -		struct i915_page_table *pt)
> +			       struct i915_page_table *pt)
>  {
> -	gen6_pte_t *pt_vaddr, scratch_pte;
> -	int i;
> +	gen6_pte_t scratch_pte;
>  
>  	WARN_ON(vm->scratch.addr == 0);
>  
>  	scratch_pte = vm->pte_encode(vm->scratch.addr,
>  			I915_CACHE_LLC, true, 0);
>  
> -	pt_vaddr = kmap_atomic(pt->base.page);
> -
> -	for (i = 0; i < GEN6_PTES; i++)
> -		pt_vaddr[i] = scratch_pte;
> -
> -	kunmap_atomic(pt_vaddr);
> +	fill_page_dma_32(vm->dev, &pt->base, scratch_pte);
>  }
>  
>  static int gen6_alloc_va_range(struct i915_address_space *vm,
> -- 
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5175eb8..a3ee710 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -330,6 +330,27 @@  static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 	memset(p, 0, sizeof(*p));
 }
 
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+			  const uint64_t val)
+{
+	int i;
+	uint64_t * const vaddr = kmap_atomic(p->page);
+
+	for (i = 0; i < 512; i++)
+		vaddr[i] = val;
+
+	kunmap_atomic(vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+			     const uint32_t val32)
+{
+	uint64_t v = val32;
+	v = v << 32 | val32;
+
+	fill_page_dma(dev, p, v);
+}
+
 static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 {
 	cleanup_page_dma(dev, &pt->base);
@@ -340,19 +361,12 @@  static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 static void gen8_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	gen8_pte_t *pt_vaddr, scratch_pte;
-	int i;
+	gen8_pte_t scratch_pte;
 
-	pt_vaddr = kmap_atomic(pt->base.page);
 	scratch_pte = gen8_pte_encode(vm->scratch.addr,
 				      I915_CACHE_LLC, true);
 
-	for (i = 0; i < GEN8_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-	kunmap_atomic(pt_vaddr);
+	fill_page_dma(vm->dev, &pt->base, scratch_pte);
 }
 
 static struct i915_page_table *alloc_pt(struct drm_device *dev)
@@ -585,20 +599,13 @@  static void gen8_initialize_pd(struct i915_address_space *vm,
 			       struct i915_page_directory *pd)
 {
 	struct i915_hw_ppgtt *ppgtt =
-			container_of(vm, struct i915_hw_ppgtt, base);
-	gen8_pde_t *page_directory;
-	struct i915_page_table *pt;
-	int i;
+		container_of(vm, struct i915_hw_ppgtt, base);
+	gen8_pde_t scratch_pde;
 
-	page_directory = kmap_atomic(pd->base.page);
-	pt = ppgtt->scratch_pt;
-	for (i = 0; i < I915_PDES; i++)
-		/* Map the PDE to the page table */
-		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
+	scratch_pde = gen8_pde_encode(vm->dev, ppgtt->scratch_pt->base.daddr,
+				      I915_CACHE_LLC);
 
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(page_directory, PAGE_SIZE);
-	kunmap_atomic(page_directory);
+	fill_page_dma(vm->dev, &pd->base, scratch_pde);
 }
 
 static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
@@ -1242,22 +1249,16 @@  static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 }
 
 static void gen6_initialize_pt(struct i915_address_space *vm,
-		struct i915_page_table *pt)
+			       struct i915_page_table *pt)
 {
-	gen6_pte_t *pt_vaddr, scratch_pte;
-	int i;
+	gen6_pte_t scratch_pte;
 
 	WARN_ON(vm->scratch.addr == 0);
 
 	scratch_pte = vm->pte_encode(vm->scratch.addr,
 			I915_CACHE_LLC, true, 0);
 
-	pt_vaddr = kmap_atomic(pt->base.page);
-
-	for (i = 0; i < GEN6_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	kunmap_atomic(pt_vaddr);
+	fill_page_dma_32(vm->dev, &pt->base, scratch_pte);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,