diff mbox

[5/6] drm/i915: Let userspace create a faultable pad page

Message ID 1372458217-2053-6-git-send-email-ben@bwidawsk.net (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky June 28, 2013, 10:23 p.m. UTC
Whenever userspace allocates a BO, add one more page. On maps of this
BO, make sure that last page will fault if accessed. We don't need to do
this for the kernel, since our bugs should be fairly easy to catch
already.

The code could be optimized to allocate one less page for this, but it
requires a lot of work to make that happen. Instead, do everything as we
normally would, and clear out the last page after it's all done.

NOTE: This does not convert previously allocated objects.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h     |  2 ++
 drivers/gpu/drm/i915/i915_gem.c     |  7 +++++++
 drivers/gpu/drm/i915/i915_gem_gtt.c | 22 +++++++++++++++++++---
 3 files changed, 28 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9e38a6..3b2046b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1180,6 +1180,7 @@  typedef struct drm_i915_private {
 #define I915_DEBUG_NONE 0
 #define I915_SCRATCH_FAULTS (1<<0)
 #define I915_SYNC_EXECBUF (1<<1)
+#define I915_PAD_PAGE (1<<2)
 	u64 debug_flags;
 } drm_i915_private_t;
 
@@ -1307,6 +1308,7 @@  struct drm_i915_gem_object {
 	unsigned int has_aliasing_ppgtt_mapping:1;
 	unsigned int has_global_gtt_mapping:1;
 	unsigned int has_dma_mapping:1;
+	unsigned int has_pad_page:1;
 
 	struct sg_table *pages;
 	int pages_pin_count;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c68b90f..f84aada 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -205,6 +205,7 @@  i915_gem_create(struct drm_file *file,
 		uint64_t size,
 		uint32_t *handle_p)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	int ret;
 	u32 handle;
@@ -213,11 +214,17 @@  i915_gem_create(struct drm_file *file,
 	if (size == 0)
 		return -EINVAL;
 
+	if (dev_priv->debug_flags & I915_PAD_PAGE)
+		size += PAGE_SIZE;
+
 	/* Allocate the new object */
 	obj = i915_gem_alloc_object(dev, size);
 	if (obj == NULL)
 		return -ENOMEM;
 
+	if (dev_priv->debug_flags & I915_PAD_PAGE)
+		obj->has_pad_page = 1;
+
 	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	if (ret) {
 		drm_gem_object_release(&obj->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f71636e..187738f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -43,6 +43,7 @@ 
 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
 /* Use a pattern to make debug a bit easier */
 #define GEN6_PTE_FAULT			0xbaddc0de
+#define GEN6_PAD_PTE_FAULT		0x0c0ffee
 
 static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
 				      dma_addr_t addr,
@@ -200,6 +201,9 @@  static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
 	if (unlikely(dev_priv->debug_flags & I915_SCRATCH_FAULTS))
 		scratch_pte = GEN6_PTE_FAULT;
 
+	if (unlikely(dev_priv->debug_flags & I915_PAD_PAGE))
+		scratch_pte = GEN6_PAD_PTE_FAULT;
+
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
 		if (last_pte > I915_PPGTT_PT_ENTRIES)
@@ -385,9 +389,13 @@  void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 			    struct drm_i915_gem_object *obj,
 			    enum i915_cache_level cache_level)
 {
-	ppgtt->insert_entries(ppgtt, obj->pages,
-			      obj->gtt_space->start >> PAGE_SHIFT,
-			      cache_level);
+	unsigned int first = obj->gtt_space->start >> PAGE_SHIFT;
+
+	ppgtt->insert_entries(ppgtt, obj->pages, first, cache_level);
+	if (unlikely(obj->has_pad_page)) {
+		first += (obj->gtt_space->size >> PAGE_SHIFT) - 1;
+		ppgtt->clear_range(ppgtt, first, 1);
+	}
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -529,6 +537,8 @@  static void gen6_ggtt_clear_range(struct drm_device *dev,
 					       I915_CACHE_LLC);
 	if (unlikely(dev_priv->debug_flags & I915_SCRATCH_FAULTS))
 		scratch_pte = GEN6_PTE_FAULT;
+	if (unlikely(dev_priv->debug_flags & I915_PAD_PAGE))
+		scratch_pte = GEN6_PAD_PTE_FAULT;
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
 	readl(gtt_base);
@@ -560,10 +570,15 @@  void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned int first = obj->gtt_space->start >> PAGE_SHIFT;
 
 	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
 					 obj->gtt_space->start >> PAGE_SHIFT,
 					 cache_level);
+	if (unlikely(obj->has_pad_page)) {
+		first += (obj->gtt_space->size >> PAGE_SHIFT) - 1;
+		dev_priv->gtt.gtt_clear_range(dev, first, 1);
+	}
 
 	obj->has_global_gtt_mapping = 1;
 }
@@ -632,6 +647,7 @@  void i915_gem_setup_global_gtt(struct drm_device *dev,
 	unsigned long hole_start, hole_end;
 
 	BUILD_BUG_ON(GEN6_PTE_FAULT & GEN6_PTE_VALID);
+	BUILD_BUG_ON(GEN6_PAD_PTE_FAULT & GEN6_PTE_VALID);
 	BUG_ON(mappable_end > end);
 
 	/* Subtract the guard page ... */