diff mbox

[6/6] drm/i915: distinguish pad and fault pages

Message ID 1372458217-2053-7-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky June 28, 2013, 10:23 p.m. UTC
For finer grained debug, make it possible to have both magic numbers be
present for the PTEs if both faulting and pad pages options are enabled.
Previously the padding magic number took precedence.

This change might not seem worthwhile to some. It helped me verify my
code was correct, and I like it. It can be dropped without too much
impact to the series though.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h     |  6 ++++--
 drivers/gpu/drm/i915/i915_gem_gtt.c | 42 ++++++++++++++++++++++++-------------
 2 files changed, 31 insertions(+), 17 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3b2046b..aaf9554 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -475,7 +475,8 @@  struct i915_gtt {
 	void (*gtt_remove)(struct drm_device *dev);
 	void (*gtt_clear_range)(struct drm_device *dev,
 				unsigned int first_entry,
-				unsigned int num_entries);
+				unsigned int num_entries,
+				bool pad);
 	void (*gtt_insert_entries)(struct drm_device *dev,
 				   struct sg_table *st,
 				   unsigned int pg_start,
@@ -499,7 +500,8 @@  struct i915_hw_ppgtt {
 	/* pte functions, mirroring the interface of the global gtt. */
 	void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
 			    unsigned int first_entry,
-			    unsigned int num_entries);
+			    unsigned int num_entries,
+			    bool pad);
 	void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
 			       struct sg_table *st,
 			       unsigned int pg_start,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 187738f..5192f45 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -186,7 +186,8 @@  static int gen6_ppgtt_enable(struct drm_device *dev)
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 				   unsigned first_entry,
-				   unsigned num_entries)
+				   unsigned num_entries,
+				   bool pad)
 {
 	struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
 	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
@@ -194,6 +195,7 @@  static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
 	unsigned last_pte, i;
 
+	BUG_ON(pad && num_entries != 1);
 	scratch_pte = ppgtt->pte_encode(ppgtt->dev,
 					ppgtt->scratch_page_dma_addr,
 					I915_CACHE_LLC);
@@ -201,7 +203,7 @@  static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 	if (unlikely(dev_priv->debug_flags & I915_SCRATCH_FAULTS))
 		scratch_pte = GEN6_PTE_FAULT;
 
-	if (unlikely(dev_priv->debug_flags & I915_PAD_PAGE))
+	if (unlikely(pad))
 		scratch_pte = GEN6_PAD_PTE_FAULT;
 
 	while (num_entries) {
@@ -324,7 +326,8 @@  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	}
 
 	ppgtt->clear_range(ppgtt, 0,
-			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+			   ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES,
+			   false);
 
 	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -394,7 +397,7 @@  void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 	ppgtt->insert_entries(ppgtt, obj->pages, first, cache_level);
 	if (unlikely(obj->has_pad_page)) {
 		first += (obj->gtt_space->size >> PAGE_SHIFT) - 1;
-		ppgtt->clear_range(ppgtt, first, 1);
+		ppgtt->clear_range(ppgtt, first, 1, true);
 	}
 }
 
@@ -403,7 +406,8 @@  void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 {
 	ppgtt->clear_range(ppgtt,
 			   obj->gtt_space->start >> PAGE_SHIFT,
-			   obj->base.size >> PAGE_SHIFT);
+			   obj->base.size >> PAGE_SHIFT,
+			   false);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -451,7 +455,8 @@  void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 	/* First fill our portion of the GTT with scratch pages */
 	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
-				      dev_priv->gtt.total / PAGE_SIZE);
+				      dev_priv->gtt.total / PAGE_SIZE,
+				      false);
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		i915_gem_clflush_object(obj);
@@ -519,7 +524,8 @@  static void gen6_ggtt_insert_entries(struct drm_device *dev,
 
 static void gen6_ggtt_clear_range(struct drm_device *dev,
 				  unsigned int first_entry,
-				  unsigned int num_entries)
+				  unsigned int num_entries,
+				  bool pad)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -527,6 +533,7 @@  static void gen6_ggtt_clear_range(struct drm_device *dev,
 	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
 	int i;
 
+	BUG_ON(pad && num_entries != 1);
 	if (WARN(num_entries > max_entries,
 		 "First entry = %d; Num entries = %d (max=%d)\n",
 		 first_entry, num_entries, max_entries))
@@ -537,7 +544,7 @@  static void gen6_ggtt_clear_range(struct drm_device *dev,
 					       I915_CACHE_LLC);
 	if (unlikely(dev_priv->debug_flags & I915_SCRATCH_FAULTS))
 		scratch_pte = GEN6_PTE_FAULT;
-	if (unlikely(dev_priv->debug_flags & I915_PAD_PAGE))
+	if (unlikely(pad))
 		scratch_pte = GEN6_PAD_PTE_FAULT;
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -559,7 +566,8 @@  static void i915_ggtt_insert_entries(struct drm_device *dev,
 
 static void i915_ggtt_clear_range(struct drm_device *dev,
 				  unsigned int first_entry,
-				  unsigned int num_entries)
+				  unsigned int num_entries,
+				  bool pad)
 {
 	intel_gtt_clear_range(first_entry, num_entries);
 }
@@ -577,7 +585,7 @@  void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 					 cache_level);
 	if (unlikely(obj->has_pad_page)) {
 		first += (obj->gtt_space->size >> PAGE_SHIFT) - 1;
-		dev_priv->gtt.gtt_clear_range(dev, first, 1);
+		dev_priv->gtt.gtt_clear_range(dev, first, 1, true);
 	}
 
 	obj->has_global_gtt_mapping = 1;
@@ -590,7 +598,8 @@  void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 
 	dev_priv->gtt.gtt_clear_range(obj->base.dev,
 				      obj->gtt_space->start >> PAGE_SHIFT,
-				      obj->base.size >> PAGE_SHIFT);
+				      obj->base.size >> PAGE_SHIFT,
+				      false);
 
 	obj->has_global_gtt_mapping = 0;
 }
@@ -677,11 +686,12 @@  void i915_gem_setup_global_gtt(struct drm_device *dev,
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
 			      hole_start, hole_end);
 		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
-					      (hole_end-hole_start) / PAGE_SIZE);
+					      (hole_end-hole_start) / PAGE_SIZE,
+					      false);
 	}
 
 	/* And finally clear the reserved guard page */
-	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1, false);
 }
 
 static bool
@@ -917,11 +927,13 @@  void i915_gem_rewrite_scratch_ptes(struct drm_device *dev)
 	drm_mm_for_each_hole(n, &dev_priv->mm.gtt_space, start, end) {
 		dev_priv->gtt.gtt_clear_range(dev,
 					      start >> PAGE_SHIFT,
-					      (end - start) >> PAGE_SHIFT);
+					      (end - start) >> PAGE_SHIFT,
+					      false);
 		if (ppgtt)
 			ppgtt->clear_range(ppgtt,
 					   start >> PAGE_SHIFT,
-					   (end - start) >> PAGE_SHIFT);
+					   (end - start) >> PAGE_SHIFT,
+					   false);
 	}
 }
 #endif