diff mbox series

[02/13] drm/i915/dmc_wl: Use non-sleeping variant of MMIO wait

Message ID 20241021222744.294371-3-gustavo.sousa@intel.com (mailing list archive)
State New
Headers show
Series drm/i915/dmc_wl: Fixes and enablement for Xe3_LPD | expand

Commit Message

Gustavo Sousa Oct. 21, 2024, 10:27 p.m. UTC
Some display MMIO transactions for offsets in the range that requires
the DMC wakelock happen in atomic context (this has been confirmed
during tests on PTL). That means that we need to use a non-sleeping
variant of MMIO waiting function.

Implement __intel_de_wait_for_register_atomic_nowl() and use it when
waiting for acknowledgment of acquire/release.

Signed-off-by: Gustavo Sousa <gustavo.sousa@intel.com>
---
 drivers/gpu/drm/i915/display/intel_de.h     | 11 +++++++++++
 drivers/gpu/drm/i915/display/intel_dmc_wl.c | 20 ++++++++++++--------
 2 files changed, 23 insertions(+), 8 deletions(-)

Comments

Jani Nikula Oct. 22, 2024, 9:34 a.m. UTC | #1
On Mon, 21 Oct 2024, Gustavo Sousa <gustavo.sousa@intel.com> wrote:
> Some display MMIO transactions for offsets in the range that requires
> the DMC wakelock happen in atomic context (this has been confirmed
> during tests on PTL). That means that we need to use a non-sleeping
> variant of MMIO waiting function.
>
> Implement __intel_de_wait_for_register_atomic_nowl() and use it when
> waiting for acknowledgment of acquire/release.
>
> Signed-off-by: Gustavo Sousa <gustavo.sousa@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_de.h     | 11 +++++++++++
>  drivers/gpu/drm/i915/display/intel_dmc_wl.c | 20 ++++++++++++--------
>  2 files changed, 23 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
> index e017cd4a8168..4116783a62dd 100644
> --- a/drivers/gpu/drm/i915/display/intel_de.h
> +++ b/drivers/gpu/drm/i915/display/intel_de.h
> @@ -121,6 +121,17 @@ ____intel_de_wait_for_register_nowl(struct intel_display *display,
>  }
>  #define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
>  
> +static inline int
> +____intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
> +					   i915_reg_t reg,
> +					   u32 mask, u32 value,
> +					   unsigned int fast_timeout_us)
> +{
> +	return __intel_wait_for_register(__to_uncore(display), reg, mask,
> +					 value, fast_timeout_us, 0, NULL);
> +}
> +#define __intel_de_wait_for_register_atomic_nowl(p,...) ____intel_de_wait_for_register_atomic_nowl(__to_intel_display(p), __VA_ARGS__)
> +

There's no need to add the wrapper when all users pass struct
intel_display. And we don't want new users that pass i915.

And why are we adding new stuff and users with double underscores?

>  static inline int
>  __intel_de_wait(struct intel_display *display, i915_reg_t reg,
>  		u32 mask, u32 value, unsigned int timeout)
> diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
> index 5634ff07269d..8056a3c8666c 100644
> --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
> +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
> @@ -39,7 +39,7 @@
>   * potential future use.
>   */
>  
> -#define DMC_WAKELOCK_CTL_TIMEOUT 5
> +#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
>  #define DMC_WAKELOCK_HOLD_TIME 50
>  
>  struct intel_dmc_wl_range {
> @@ -78,9 +78,9 @@ static void intel_dmc_wl_work(struct work_struct *work)
>  
>  	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
>  
> -	if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
> -					      DMC_WAKELOCK_CTL_ACK, 0,
> -					      DMC_WAKELOCK_CTL_TIMEOUT)) {
> +	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
> +						     DMC_WAKELOCK_CTL_ACK, 0,
> +						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
>  		WARN_RATELIMIT(1, "DMC wakelock release timed out");
>  		goto out_unlock;
>  	}
> @@ -216,10 +216,14 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
>  		__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
>  				    DMC_WAKELOCK_CTL_REQ);
>  
> -		if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
> -						      DMC_WAKELOCK_CTL_ACK,
> -						      DMC_WAKELOCK_CTL_ACK,
> -						      DMC_WAKELOCK_CTL_TIMEOUT)) {
> +		/*
> +		 * We need to use the atomic variant of the waiting routine
> +		 * because the DMC wakelock is also taken in atomic context.
> +		 */
> +		if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
> +							     DMC_WAKELOCK_CTL_ACK,
> +							     DMC_WAKELOCK_CTL_ACK,
> +							     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
>  			WARN_RATELIMIT(1, "DMC wakelock ack timed out");
>  			goto out_unlock;
>  		}
Gustavo Sousa Oct. 22, 2024, 10:55 a.m. UTC | #2
Quoting Jani Nikula (2024-10-22 06:34:44-03:00)
>On Mon, 21 Oct 2024, Gustavo Sousa <gustavo.sousa@intel.com> wrote:
>> Some display MMIO transactions for offsets in the range that requires
>> the DMC wakelock happen in atomic context (this has been confirmed
>> during tests on PTL). That means that we need to use a non-sleeping
>> variant of MMIO waiting function.
>>
>> Implement __intel_de_wait_for_register_atomic_nowl() and use it when
>> waiting for acknowledgment of acquire/release.
>>
>> Signed-off-by: Gustavo Sousa <gustavo.sousa@intel.com>
>> ---
>>  drivers/gpu/drm/i915/display/intel_de.h     | 11 +++++++++++
>>  drivers/gpu/drm/i915/display/intel_dmc_wl.c | 20 ++++++++++++--------
>>  2 files changed, 23 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
>> index e017cd4a8168..4116783a62dd 100644
>> --- a/drivers/gpu/drm/i915/display/intel_de.h
>> +++ b/drivers/gpu/drm/i915/display/intel_de.h
>> @@ -121,6 +121,17 @@ ____intel_de_wait_for_register_nowl(struct intel_display *display,
>>  }
>>  #define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
>>  
>> +static inline int
>> +____intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
>> +                                           i915_reg_t reg,
>> +                                           u32 mask, u32 value,
>> +                                           unsigned int fast_timeout_us)
>> +{
>> +        return __intel_wait_for_register(__to_uncore(display), reg, mask,
>> +                                         value, fast_timeout_us, 0, NULL);
>> +}
>> +#define __intel_de_wait_for_register_atomic_nowl(p,...) ____intel_de_wait_for_register_atomic_nowl(__to_intel_display(p), __VA_ARGS__)
>> +
>
>There's no need to add the wrapper when all users pass struct
>intel_display. And we don't want new users that pass i915.

Ah, okay. Thanks.

>
>And why are we adding new stuff and users with double underscores?

Well, this is a no-wakelock variant and it shouldn't be used broadly. I
believe that was the motivation of all "__intel_de_*nowl" variants being
prefixed with the underscores.

--
Gustavo Sousa

>
>>  static inline int
>>  __intel_de_wait(struct intel_display *display, i915_reg_t reg,
>>                  u32 mask, u32 value, unsigned int timeout)
>> diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
>> index 5634ff07269d..8056a3c8666c 100644
>> --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
>> +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
>> @@ -39,7 +39,7 @@
>>   * potential future use.
>>   */
>>  
>> -#define DMC_WAKELOCK_CTL_TIMEOUT 5
>> +#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
>>  #define DMC_WAKELOCK_HOLD_TIME 50
>>  
>>  struct intel_dmc_wl_range {
>> @@ -78,9 +78,9 @@ static void intel_dmc_wl_work(struct work_struct *work)
>>  
>>          __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
>>  
>> -        if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
>> -                                              DMC_WAKELOCK_CTL_ACK, 0,
>> -                                              DMC_WAKELOCK_CTL_TIMEOUT)) {
>> +        if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
>> +                                                     DMC_WAKELOCK_CTL_ACK, 0,
>> +                                                     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
>>                  WARN_RATELIMIT(1, "DMC wakelock release timed out");
>>                  goto out_unlock;
>>          }
>> @@ -216,10 +216,14 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
>>                  __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
>>                                      DMC_WAKELOCK_CTL_REQ);
>>  
>> -                if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
>> -                                                      DMC_WAKELOCK_CTL_ACK,
>> -                                                      DMC_WAKELOCK_CTL_ACK,
>> -                                                      DMC_WAKELOCK_CTL_TIMEOUT)) {
>> +                /*
>> +                 * We need to use the atomic variant of the waiting routine
>> +                 * because the DMC wakelock is also taken in atomic context.
>> +                 */
>> +                if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
>> +                                                             DMC_WAKELOCK_CTL_ACK,
>> +                                                             DMC_WAKELOCK_CTL_ACK,
>> +                                                             DMC_WAKELOCK_CTL_TIMEOUT_US)) {
>>                          WARN_RATELIMIT(1, "DMC wakelock ack timed out");
>>                          goto out_unlock;
>>                  }
>
>-- 
>Jani Nikula, Intel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index e017cd4a8168..4116783a62dd 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -121,6 +121,17 @@  ____intel_de_wait_for_register_nowl(struct intel_display *display,
 }
 #define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
 
+static inline int
+____intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
+					   i915_reg_t reg,
+					   u32 mask, u32 value,
+					   unsigned int fast_timeout_us)
+{
+	return __intel_wait_for_register(__to_uncore(display), reg, mask,
+					 value, fast_timeout_us, 0, NULL);
+}
+#define __intel_de_wait_for_register_atomic_nowl(p,...) ____intel_de_wait_for_register_atomic_nowl(__to_intel_display(p), __VA_ARGS__)
+
 static inline int
 __intel_de_wait(struct intel_display *display, i915_reg_t reg,
 		u32 mask, u32 value, unsigned int timeout)
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index 5634ff07269d..8056a3c8666c 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -39,7 +39,7 @@ 
  * potential future use.
  */
 
-#define DMC_WAKELOCK_CTL_TIMEOUT 5
+#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
 #define DMC_WAKELOCK_HOLD_TIME 50
 
 struct intel_dmc_wl_range {
@@ -78,9 +78,9 @@  static void intel_dmc_wl_work(struct work_struct *work)
 
 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
 
-	if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
-					      DMC_WAKELOCK_CTL_ACK, 0,
-					      DMC_WAKELOCK_CTL_TIMEOUT)) {
+	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+						     DMC_WAKELOCK_CTL_ACK, 0,
+						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
 		WARN_RATELIMIT(1, "DMC wakelock release timed out");
 		goto out_unlock;
 	}
@@ -216,10 +216,14 @@  void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
 		__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
 				    DMC_WAKELOCK_CTL_REQ);
 
-		if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
-						      DMC_WAKELOCK_CTL_ACK,
-						      DMC_WAKELOCK_CTL_ACK,
-						      DMC_WAKELOCK_CTL_TIMEOUT)) {
+		/*
+		 * We need to use the atomic variant of the waiting routine
+		 * because the DMC wakelock is also taken in atomic context.
+		 */
+		if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+							     DMC_WAKELOCK_CTL_ACK,
+							     DMC_WAKELOCK_CTL_ACK,
+							     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
 			WARN_RATELIMIT(1, "DMC wakelock ack timed out");
 			goto out_unlock;
 		}