diff mbox series

[RFC,1/4] drm/i915: split out uncore_mmio_debug

Message ID 20190624203152.13725-2-daniele.ceraolospurio@intel.com (mailing list archive)
State New, archived
Headers show
Series Display uncore | expand

Commit Message

Daniele Ceraolo Spurio June 24, 2019, 8:31 p.m. UTC
Multiple uncore structures will share the debug infrastructure, so
move it to a common place and add extra locking around it.

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c     |  1 +
 drivers/gpu/drm/i915/i915_drv.h     |  1 +
 drivers/gpu/drm/i915/intel_uncore.c | 54 +++++++++++++++++++++--------
 drivers/gpu/drm/i915/intel_uncore.h |  9 ++++-
 4 files changed, 50 insertions(+), 15 deletions(-)

Comments

Chris Wilson June 26, 2019, 10:02 a.m. UTC | #1
Quoting Daniele Ceraolo Spurio (2019-06-24 21:31:49)
> @@ -605,18 +614,20 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
>  void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
>  {
>         spin_lock_irq(&uncore->lock);
> +       spin_lock(&uncore->debug->lock);
>         if (!uncore->user_forcewake.count++) {

Afaict, uncore->user_forcewake.count is only guarded by uncore->lock
and we only need to take debug->lock for the debug->unclaimed_mmio_check
manipulation. But there needs to be a shared usage counter around the
debug as it is shared state.

>                 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
>  
>                 /* Save and disable mmio debugging for the user bypass */
>                 uncore->user_forcewake.saved_mmio_check =
> -                       uncore->unclaimed_mmio_check;
> +                       uncore->debug->unclaimed_mmio_check;
>                 uncore->user_forcewake.saved_mmio_debug =
>                         i915_modparams.mmio_debug;

Something more like

spin_lock_irq(&uncore->lock);
if (!uncore->user_forcewake_count++) {
	spin_lock(&uncore->debug->lock);
	if (!uncore->debug->usage_count++) {
		...
	}
	spin_unlock(&uncore->debug->lock);
}
...
spin_unlock_irq(&uncore->lock);
?
-Chris
Daniele Ceraolo Spurio June 26, 2019, 5:38 p.m. UTC | #2
On 6/26/19 3:02 AM, Chris Wilson wrote:
> Quoting Daniele Ceraolo Spurio (2019-06-24 21:31:49)
>> @@ -605,18 +614,20 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
>>   void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
>>   {
>>          spin_lock_irq(&uncore->lock);
>> +       spin_lock(&uncore->debug->lock);
>>          if (!uncore->user_forcewake.count++) {
> 
> Afaict, uncore->user_forcewake.count is only guarded by uncore->lock
> and we only need to take debug->lock for the debug->unclaimed_mmio_check
> manipulation. But there needs to be a shared usage counter around the
> debug as it is shared state.
> 
>>                  intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
>>   
>>                  /* Save and disable mmio debugging for the user bypass */
>>                  uncore->user_forcewake.saved_mmio_check =
>> -                       uncore->unclaimed_mmio_check;
>> +                       uncore->debug->unclaimed_mmio_check;
>>                  uncore->user_forcewake.saved_mmio_debug =
>>                          i915_modparams.mmio_debug;
> 
> Something more like
> 
> spin_lock_irq(&uncore->lock);
> if (!uncore->user_forcewake_count++) {
> 	spin_lock(&uncore->debug->lock);
> 	if (!uncore->debug->usage_count++) {
> 		...
> 	}
> 	spin_unlock(&uncore->debug->lock);
> }
> ...
> spin_unlock_irq(&uncore->lock);
> ?
> -Chris
> 

I do something similar in the next patch in the series (with the lock 
still on the outside of the if). I've split that out because I've added 
some functional changes to the flow. I can squash the 2 patches if you 
thing it is better that way.

Daniele
Chris Wilson June 26, 2019, 5:58 p.m. UTC | #3
Quoting Daniele Ceraolo Spurio (2019-06-26 18:38:45)
> 
> 
> On 6/26/19 3:02 AM, Chris Wilson wrote:
> > Quoting Daniele Ceraolo Spurio (2019-06-24 21:31:49)
> >> @@ -605,18 +614,20 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
> >>   void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
> >>   {
> >>          spin_lock_irq(&uncore->lock);
> >> +       spin_lock(&uncore->debug->lock);
> >>          if (!uncore->user_forcewake.count++) {
> > 
> > Afaict, uncore->user_forcewake.count is only guarded by uncore->lock
> > and we only need to take debug->lock for the debug->unclaimed_mmio_check
> > manipulation. But there needs to be a shared usage counter around the
> > debug as it is shared state.
> > 
> >>                  intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
> >>   
> >>                  /* Save and disable mmio debugging for the user bypass */
> >>                  uncore->user_forcewake.saved_mmio_check =
> >> -                       uncore->unclaimed_mmio_check;
> >> +                       uncore->debug->unclaimed_mmio_check;
> >>                  uncore->user_forcewake.saved_mmio_debug =
> >>                          i915_modparams.mmio_debug;
> > 
> > Something more like
> > 
> > spin_lock_irq(&uncore->lock);
> > if (!uncore->user_forcewake_count++) {
> >       spin_lock(&uncore->debug->lock);
> >       if (!uncore->debug->usage_count++) {
> >               ...
> >       }
> >       spin_unlock(&uncore->debug->lock);
> > }
> > ...
> > spin_unlock_irq(&uncore->lock);
> > ?
> > -Chris
> > 
> 
> I do something similar in the next patch in the series (with the lock 
> still on the outside of the if). I've split that out because I've added 
> some functional changes to the flow. I can squash the 2 patches if you 
> thing it is better that way.

Yes. Looking at the second patch, that is clearer wrt what data we are
guarding with the locks.

Don't drop the intel_ prefix from intel_uncore_debug as it looks to
still be visible outside of its enclave (it's getting long, but it should
be more or less internal to the various intel_uncore implementations) and
squash these 2 as I feel more comfortable with intel_uncore_debug taking
control of the locking as required for sharing and reviewing the locking
wrt to the shared data.
-Chris
Daniele Ceraolo Spurio June 26, 2019, 6:20 p.m. UTC | #4
On 6/26/19 10:58 AM, Chris Wilson wrote:
> Quoting Daniele Ceraolo Spurio (2019-06-26 18:38:45)
>>
>>
>> On 6/26/19 3:02 AM, Chris Wilson wrote:
>>> Quoting Daniele Ceraolo Spurio (2019-06-24 21:31:49)
>>>> @@ -605,18 +614,20 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
>>>>    void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
>>>>    {
>>>>           spin_lock_irq(&uncore->lock);
>>>> +       spin_lock(&uncore->debug->lock);
>>>>           if (!uncore->user_forcewake.count++) {
>>>
>>> Afaict, uncore->user_forcewake.count is only guarded by uncore->lock
>>> and we only need to take debug->lock for the debug->unclaimed_mmio_check
>>> manipulation. But there needs to be a shared usage counter around the
>>> debug as it is shared state.
>>>
>>>>                   intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
>>>>    
>>>>                   /* Save and disable mmio debugging for the user bypass */
>>>>                   uncore->user_forcewake.saved_mmio_check =
>>>> -                       uncore->unclaimed_mmio_check;
>>>> +                       uncore->debug->unclaimed_mmio_check;
>>>>                   uncore->user_forcewake.saved_mmio_debug =
>>>>                           i915_modparams.mmio_debug;
>>>
>>> Something more like
>>>
>>> spin_lock_irq(&uncore->lock);
>>> if (!uncore->user_forcewake_count++) {
>>>        spin_lock(&uncore->debug->lock);
>>>        if (!uncore->debug->usage_count++) {
>>>                ...
>>>        }
>>>        spin_unlock(&uncore->debug->lock);
>>> }
>>> ...
>>> spin_unlock_irq(&uncore->lock);
>>> ?
>>> -Chris
>>>
>>
>> I do something similar in the next patch in the series (with the lock
>> still on the outside of the if). I've split that out because I've added
>> some functional changes to the flow. I can squash the 2 patches if you
>> thing it is better that way.
> 
> Yes. Looking at the second patch, that is clearer wrt what data we are
> guarding with the locks.
> 
> Don't drop the intel_ prefix from intel_uncore_debug as it looks to
> still be visible outside of its enclave (it's getting long, but it should

I'm assuming you're referring to the 2 new functions. These are actually 
meant to only used within the file and not be externally visible, I just 
forgot the static tag (and sparse complained for that). Everything else 
should still have the intel_ prefix.

Daniele

> be more or less internal to the various intel_uncore implementations) and
> squash these 2 as I feel more comfortable with intel_uncore_debug taking
> control of the locking as required for sharing and reviewing the locking
> wrt to the shared data.
> -Chris
>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e1817f89f5d5..1c4cfdb04867 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -901,6 +901,7 @@  static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 
 	intel_device_info_subplatform_init(dev_priv);
 
+	intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
 	intel_uncore_init_early(&dev_priv->uncore, dev_priv);
 
 	spin_lock_init(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 62e7c5e8aee5..a53b65d78159 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1341,6 +1341,7 @@  struct drm_i915_private {
 	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
 
 	struct intel_uncore uncore;
+	struct intel_uncore_mmio_debug mmio_debug;
 
 	struct i915_virtual_gpu vgpu;
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 68d54e126d79..f2dfaccd6614 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -34,6 +34,13 @@ 
 
 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
 
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+{
+	spin_lock_init(&mmio_debug->lock);
+	mmio_debug->unclaimed_mmio_check = 1;
+}
+
 static const char * const forcewake_domain_names[] = {
 	"render",
 	"blitter",
@@ -473,6 +480,8 @@  check_for_unclaimed_mmio(struct intel_uncore *uncore)
 {
 	bool ret = false;
 
+	lockdep_assert_held(&uncore->debug->lock);
+
 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
 		ret |= fpga_check_for_unclaimed_mmio(uncore);
 
@@ -605,18 +614,20 @@  void intel_uncore_forcewake_get(struct intel_uncore *uncore,
 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
 {
 	spin_lock_irq(&uncore->lock);
+	spin_lock(&uncore->debug->lock);
 	if (!uncore->user_forcewake.count++) {
 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
 
 		/* Save and disable mmio debugging for the user bypass */
 		uncore->user_forcewake.saved_mmio_check =
-			uncore->unclaimed_mmio_check;
+			uncore->debug->unclaimed_mmio_check;
 		uncore->user_forcewake.saved_mmio_debug =
 			i915_modparams.mmio_debug;
 
-		uncore->unclaimed_mmio_check = 0;
+		uncore->debug->unclaimed_mmio_check = 0;
 		i915_modparams.mmio_debug = 0;
 	}
+	spin_unlock(&uncore->debug->lock);
 	spin_unlock_irq(&uncore->lock);
 }
 
@@ -630,18 +641,20 @@  void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
 {
 	spin_lock_irq(&uncore->lock);
+	spin_lock(&uncore->debug->lock);
 	if (!--uncore->user_forcewake.count) {
-		if (intel_uncore_unclaimed_mmio(uncore))
+		if (check_for_unclaimed_mmio(uncore))
 			dev_info(uncore->i915->drm.dev,
 				 "Invalid mmio detected during user access\n");
 
-		uncore->unclaimed_mmio_check =
+		uncore->debug->unclaimed_mmio_check =
 			uncore->user_forcewake.saved_mmio_check;
 		i915_modparams.mmio_debug =
 			uncore->user_forcewake.saved_mmio_debug;
 
 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
 	}
+	spin_unlock(&uncore->debug->lock);
 	spin_unlock_irq(&uncore->lock);
 }
 
@@ -1059,7 +1072,16 @@  unclaimed_reg_debug(struct intel_uncore *uncore,
 	if (likely(!i915_modparams.mmio_debug))
 		return;
 
+	/* interrupts are disabled and re-enabled around uncore->lock usage */
+	lockdep_assert_held(&uncore->lock);
+
+	if (before)
+		spin_lock(&uncore->debug->lock);
+
 	__unclaimed_reg_debug(uncore, reg, read, before);
+
+	if (!before)
+		spin_unlock(&uncore->debug->lock);
 }
 
 #define GEN2_READ_HEADER(x) \
@@ -1579,6 +1601,7 @@  void intel_uncore_init_early(struct intel_uncore *uncore,
 	spin_lock_init(&uncore->lock);
 	uncore->i915 = i915;
 	uncore->rpm = &i915->runtime_pm;
+	uncore->debug = &i915->mmio_debug;
 }
 
 static void uncore_raw_init(struct intel_uncore *uncore)
@@ -1604,7 +1627,6 @@  static int uncore_forcewake_init(struct intel_uncore *uncore)
 	ret = intel_uncore_fw_domains_init(uncore);
 	if (ret)
 		return ret;
-
 	forcewake_early_sanitize(uncore, 0);
 
 	if (IS_GEN_RANGE(i915, 6, 7)) {
@@ -1653,8 +1675,6 @@  int intel_uncore_init_mmio(struct intel_uncore *uncore)
 	if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
 
-	uncore->unclaimed_mmio_check = 1;
-
 	if (!intel_uncore_has_forcewake(uncore)) {
 		uncore_raw_init(uncore);
 	} else {
@@ -1679,7 +1699,7 @@  int intel_uncore_init_mmio(struct intel_uncore *uncore)
 		uncore->flags |= UNCORE_HAS_FIFO;
 
 	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(uncore))
+	if (intel_uncore_unclaimed_mmio(uncore))
 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	return 0;
@@ -1924,7 +1944,13 @@  int __intel_wait_for_register(struct intel_uncore *uncore,
 
 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
 {
-	return check_for_unclaimed_mmio(uncore);
+	bool ret;
+
+	spin_lock_irq(&uncore->debug->lock);
+	ret = check_for_unclaimed_mmio(uncore);
+	spin_unlock_irq(&uncore->debug->lock);
+
+	return ret;
 }
 
 bool
@@ -1932,24 +1958,24 @@  intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
 {
 	bool ret = false;
 
-	spin_lock_irq(&uncore->lock);
+	spin_lock_irq(&uncore->debug->lock);
 
-	if (unlikely(uncore->unclaimed_mmio_check <= 0))
+	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
 		goto out;
 
-	if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
+	if (unlikely(check_for_unclaimed_mmio(uncore))) {
 		if (!i915_modparams.mmio_debug) {
 			DRM_DEBUG("Unclaimed register detected, "
 				  "enabling oneshot unclaimed register reporting. "
 				  "Please use i915.mmio_debug=N for more information.\n");
 			i915_modparams.mmio_debug++;
 		}
-		uncore->unclaimed_mmio_check--;
+		uncore->debug->unclaimed_mmio_check--;
 		ret = true;
 	}
 
 out:
-	spin_unlock_irq(&uncore->lock);
+	spin_unlock_irq(&uncore->debug->lock);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 7108475d9b24..d1b12b3b8353 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -36,6 +36,11 @@  struct drm_i915_private;
 struct intel_runtime_pm;
 struct intel_uncore;
 
+struct intel_uncore_mmio_debug {
+	spinlock_t lock; /** lock is also taken in irq contexts. */
+	int unclaimed_mmio_check;
+};
+
 enum forcewake_domain_id {
 	FW_DOMAIN_ID_RENDER = 0,
 	FW_DOMAIN_ID_BLITTER,
@@ -143,7 +148,7 @@  struct intel_uncore {
 		int saved_mmio_debug;
 	} user_forcewake;
 
-	int unclaimed_mmio_check;
+	struct intel_uncore_mmio_debug *debug;
 };
 
 /* Iterate over initialised fw domains */
@@ -178,6 +183,8 @@  intel_uncore_has_fifo(const struct intel_uncore *uncore)
 	return uncore->flags & UNCORE_HAS_FIFO;
 }
 
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
 void intel_uncore_init_early(struct intel_uncore *uncore,
 			     struct drm_i915_private *i915);
 int intel_uncore_init_mmio(struct intel_uncore *uncore);