drm/i915: Provide i915_request debug Kconfig options
diff mbox series

Message ID 20200710165615.15673-1-chris@chris-wilson.co.uk
State New
Headers show
Series
  • drm/i915: Provide i915_request debug Kconfig options
Related show

Commit Message

Chris Wilson July 10, 2020, 4:56 p.m. UTC
i915_request uses the particularly nasty SLAB_TYPESAFE_BY_RCU which
makes it very hard to debug use-after-free as the slab freelists are
immediately reused and so prevent the normal poisoning used to detect
uninitialised use or use-after-free. For debugging purposes, provide an
option to disable the fast slab cache.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Venkata Ramana Nayana <venkata.ramana.nayana@intel.com>
---
 drivers/gpu/drm/i915/Kconfig.debug  | 12 +++++++++++
 drivers/gpu/drm/i915/i915_request.c | 31 ++++++++++++++++++++++++++---
 2 files changed, 40 insertions(+), 3 deletions(-)

Patch
diff mbox series

diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 206882e154bc..7daa845928db 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -155,6 +155,18 @@  config DRM_I915_DEBUG_GUC
 
 	  If in doubt, say "N".
 
+config DRM_I915_DEBUG_REQUESTS
+	bool "Enable additional driver debugging for requests"
+	default n
+	depends on DRM_I915_DEBUG_GEM
+	help
+	  Enable extra sanity checks (including BUGs) along the requests
+	  paths that may slow the system down and if hit hang the machine.
+
+	  Recommended for driver developers only.
+
+	  If in doubt, say "N".
+
 config DRM_I915_SELFTEST
 	bool "Enable selftests upon driver load"
 	depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3bb7320249ae..72def88561ce 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -106,6 +106,29 @@  struct kmem_cache *i915_request_slab_cache(void)
 	return global.slab_requests;
 }
 
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_REQUESTS)
+
+static void rcu_i915_fence_free(struct rcu_head *rcu)
+{
+	struct i915_request *rq = container_of(rcu, typeof(*rq), fence.rcu);
+
+	kmem_cache_free(global.slab_requests, rq);
+}
+
+static void i915_fence_free(struct i915_request *rq)
+{
+	call_rcu(&rq->fence.rcu, rcu_i915_fence_free);
+}
+
+#else
+
+static void i915_fence_free(struct i915_request *rq)
+{
+	kmem_cache_free(global.slab_requests, rq);
+}
+
+#endif
+
 static void i915_fence_release(struct dma_fence *fence)
 {
 	struct i915_request *rq = to_request(fence);
@@ -155,7 +178,7 @@  static void i915_fence_release(struct dma_fence *fence)
 	    !cmpxchg(&rq->engine->request_pool, NULL, rq))
 		return;
 
-	kmem_cache_free(global.slab_requests, rq);
+	i915_fence_free(rq);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
@@ -1850,9 +1873,11 @@  int __init i915_global_request_init(void)
 		kmem_cache_create("i915_request",
 				  sizeof(struct i915_request),
 				  __alignof__(struct i915_request),
-				  SLAB_HWCACHE_ALIGN |
+#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_REQUESTS)
+				  SLAB_TYPESAFE_BY_RCU |
+#endif
 				  SLAB_RECLAIM_ACCOUNT |
-				  SLAB_TYPESAFE_BY_RCU,
+				  SLAB_HWCACHE_ALIGN,
 				  __i915_request_ctor);
 	if (!global.slab_requests)
 		return -ENOMEM;