diff mbox series

[05/18] drm/i915/display/display: Prefer drm_WARN_ON over WARN_ON

Message ID 20200406112800.23762-6-pankaj.laxminarayan.bharadiya@intel.com (mailing list archive)
State New, archived
Headers show
Series Prefer drm_WARN* over WARN* | expand

Commit Message

Pankaj Bharadiya April 6, 2020, 11:27 a.m. UTC
struct drm_device specific drm_WARN* macros include device information
in the backtrace, so we know what device the warnings originate from.

Prefer drm_WARN_ON over WARN_ON at places where struct i915_power_domains
struct is available.

Conversion is done with below sementic patch:

@@
identifier func, T;
@@
func(struct i915_power_domains *T,...) {
+ struct drm_i915_private *i915 = container_of(T, struct drm_i915_private, power_domains);
<+...
-WARN_ON(
+drm_WARN_ON(&i915->drm,
...)
...+>

}

Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
---
 .../drm/i915/display/intel_display_power.c    | 35 +++++++++++++------
 1 file changed, 24 insertions(+), 11 deletions(-)

Comments

Jani Nikula April 21, 2020, 7:53 a.m. UTC | #1
Pankaj, the subject line is identical to patch 4, please update.

Imre, one question inline for you.

On Mon, 06 Apr 2020, Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com> wrote:
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> index 433e5a81dd4d..5475f989df4c 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> @@ -1850,22 +1850,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
>  static bool
>  assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
>  {
> -	return !WARN_ON(power_domains->async_put_domains[0] &
> -			power_domains->async_put_domains[1]);
> +	struct drm_i915_private *i915 = container_of(power_domains,
> +						     struct drm_i915_private,
> +						     power_domains);
> +	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
> +			    power_domains->async_put_domains[1]);
>  }

Do we want to depend on struct i915_power_domains being a struct
drm_i915_private member via container_of?

BR,
Jani.

>  
>  static bool
>  __async_put_domains_state_ok(struct i915_power_domains *power_domains)
>  {
> +	struct drm_i915_private *i915 = container_of(power_domains,
> +						     struct drm_i915_private,
> +						     power_domains);
>  	enum intel_display_power_domain domain;
>  	bool err = false;
>  
>  	err |= !assert_async_put_domain_masks_disjoint(power_domains);
> -	err |= WARN_ON(!!power_domains->async_put_wakeref !=
> -		       !!__async_put_domains_mask(power_domains));
> +	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
> +			   !!__async_put_domains_mask(power_domains));
>  
>  	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
> -		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
> +		err |= drm_WARN_ON(&i915->drm,
> +				   power_domains->domain_use_count[domain] != 1);
>  
>  	return !err;
>  }
> @@ -2107,11 +2114,14 @@ static void
>  queue_async_put_domains_work(struct i915_power_domains *power_domains,
>  			     intel_wakeref_t wakeref)
>  {
> -	WARN_ON(power_domains->async_put_wakeref);
> +	struct drm_i915_private *i915 = container_of(power_domains,
> +						     struct drm_i915_private,
> +						     power_domains);
> +	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
>  	power_domains->async_put_wakeref = wakeref;
> -	WARN_ON(!queue_delayed_work(system_unbound_wq,
> -				    &power_domains->async_put_work,
> -				    msecs_to_jiffies(100)));
> +	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
> +						    &power_domains->async_put_work,
> +						    msecs_to_jiffies(100)));
>  }
>  
>  static void
> @@ -4318,6 +4328,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
>  		  const struct i915_power_well_desc *power_well_descs,
>  		  int power_well_count)
>  {
> +	struct drm_i915_private *i915 = container_of(power_domains,
> +						     struct drm_i915_private,
> +						     power_domains);
>  	u64 power_well_ids = 0;
>  	int i;
>  
> @@ -4337,8 +4350,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
>  		if (id == DISP_PW_ID_NONE)
>  			continue;
>  
> -		WARN_ON(id >= sizeof(power_well_ids) * 8);
> -		WARN_ON(power_well_ids & BIT_ULL(id));
> +		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
> +		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
>  		power_well_ids |= BIT_ULL(id);
>  	}
Imre Deak April 21, 2020, 9:19 a.m. UTC | #2
On Tue, Apr 21, 2020 at 10:53:12AM +0300, Jani Nikula wrote:
> 
> Pankaj, the subject line is identical to patch 4, please update.
> 
> Imre, one question inline for you.
> 
> On Mon, 06 Apr 2020, Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com> wrote:
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> > index 433e5a81dd4d..5475f989df4c 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> > @@ -1850,22 +1850,29 @@ static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
> >  static bool
> >  assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
> >  {
> > -	return !WARN_ON(power_domains->async_put_domains[0] &
> > -			power_domains->async_put_domains[1]);
> > +	struct drm_i915_private *i915 = container_of(power_domains,
> > +						     struct drm_i915_private,
> > +						     power_domains);
> > +	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
> > +			    power_domains->async_put_domains[1]);
> >  }
> 
> Do we want to depend on struct i915_power_domains being a struct
> drm_i915_private member via container_of?

It looks ok to me, there is only one i915_power_domains struct per
device.

> BR,
> Jani.
> 
> >  
> >  static bool
> >  __async_put_domains_state_ok(struct i915_power_domains *power_domains)
> >  {
> > +	struct drm_i915_private *i915 = container_of(power_domains,
> > +						     struct drm_i915_private,
> > +						     power_domains);
> >  	enum intel_display_power_domain domain;
> >  	bool err = false;
> >  
> >  	err |= !assert_async_put_domain_masks_disjoint(power_domains);
> > -	err |= WARN_ON(!!power_domains->async_put_wakeref !=
> > -		       !!__async_put_domains_mask(power_domains));
> > +	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
> > +			   !!__async_put_domains_mask(power_domains));
> >  
> >  	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
> > -		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
> > +		err |= drm_WARN_ON(&i915->drm,
> > +				   power_domains->domain_use_count[domain] != 1);
> >  
> >  	return !err;
> >  }
> > @@ -2107,11 +2114,14 @@ static void
> >  queue_async_put_domains_work(struct i915_power_domains *power_domains,
> >  			     intel_wakeref_t wakeref)
> >  {
> > -	WARN_ON(power_domains->async_put_wakeref);
> > +	struct drm_i915_private *i915 = container_of(power_domains,
> > +						     struct drm_i915_private,
> > +						     power_domains);
> > +	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
> >  	power_domains->async_put_wakeref = wakeref;
> > -	WARN_ON(!queue_delayed_work(system_unbound_wq,
> > -				    &power_domains->async_put_work,
> > -				    msecs_to_jiffies(100)));
> > +	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
> > +						    &power_domains->async_put_work,
> > +						    msecs_to_jiffies(100)));
> >  }
> >  
> >  static void
> > @@ -4318,6 +4328,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
> >  		  const struct i915_power_well_desc *power_well_descs,
> >  		  int power_well_count)
> >  {
> > +	struct drm_i915_private *i915 = container_of(power_domains,
> > +						     struct drm_i915_private,
> > +						     power_domains);
> >  	u64 power_well_ids = 0;
> >  	int i;
> >  
> > @@ -4337,8 +4350,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
> >  		if (id == DISP_PW_ID_NONE)
> >  			continue;
> >  
> > -		WARN_ON(id >= sizeof(power_well_ids) * 8);
> > -		WARN_ON(power_well_ids & BIT_ULL(id));
> > +		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
> > +		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
> >  		power_well_ids |= BIT_ULL(id);
> >  	}
> 
> -- 
> Jani Nikula, Intel Open Source Graphics Center
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 433e5a81dd4d..5475f989df4c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1850,22 +1850,29 @@  static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
 static bool
 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
 {
-	return !WARN_ON(power_domains->async_put_domains[0] &
-			power_domains->async_put_domains[1]);
+	struct drm_i915_private *i915 = container_of(power_domains,
+						     struct drm_i915_private,
+						     power_domains);
+	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
+			    power_domains->async_put_domains[1]);
 }
 
 static bool
 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
 {
+	struct drm_i915_private *i915 = container_of(power_domains,
+						     struct drm_i915_private,
+						     power_domains);
 	enum intel_display_power_domain domain;
 	bool err = false;
 
 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
-	err |= WARN_ON(!!power_domains->async_put_wakeref !=
-		       !!__async_put_domains_mask(power_domains));
+	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
+			   !!__async_put_domains_mask(power_domains));
 
 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
-		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+		err |= drm_WARN_ON(&i915->drm,
+				   power_domains->domain_use_count[domain] != 1);
 
 	return !err;
 }
@@ -2107,11 +2114,14 @@  static void
 queue_async_put_domains_work(struct i915_power_domains *power_domains,
 			     intel_wakeref_t wakeref)
 {
-	WARN_ON(power_domains->async_put_wakeref);
+	struct drm_i915_private *i915 = container_of(power_domains,
+						     struct drm_i915_private,
+						     power_domains);
+	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
 	power_domains->async_put_wakeref = wakeref;
-	WARN_ON(!queue_delayed_work(system_unbound_wq,
-				    &power_domains->async_put_work,
-				    msecs_to_jiffies(100)));
+	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
+						    &power_domains->async_put_work,
+						    msecs_to_jiffies(100)));
 }
 
 static void
@@ -4318,6 +4328,9 @@  __set_power_wells(struct i915_power_domains *power_domains,
 		  const struct i915_power_well_desc *power_well_descs,
 		  int power_well_count)
 {
+	struct drm_i915_private *i915 = container_of(power_domains,
+						     struct drm_i915_private,
+						     power_domains);
 	u64 power_well_ids = 0;
 	int i;
 
@@ -4337,8 +4350,8 @@  __set_power_wells(struct i915_power_domains *power_domains,
 		if (id == DISP_PW_ID_NONE)
 			continue;
 
-		WARN_ON(id >= sizeof(power_well_ids) * 8);
-		WARN_ON(power_well_ids & BIT_ULL(id));
+		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
 		power_well_ids |= BIT_ULL(id);
 	}