diff mbox series

[1/2] clk: Fix the CLK_IGNORE_UNUSED failure issue

Message ID 20240929-fix_glitch_free-v1-1-22f9c36b7edf@amlogic.com (mailing list archive)
State New, archived
Headers show
Series clk: Fix issues related to CLK_IGNORE_UNUSED failures and amlogic glitch free mux | expand

Commit Message

Chuan Liu via B4 Relay Sept. 29, 2024, 6:10 a.m. UTC
From: Chuan Liu <chuan.liu@amlogic.com>

When the clk_disable_unused_subtree() function disables an unused clock,
if CLK_OPS_PARENT_ENABLE is configured on the clock,
clk_core_prepare_enable() and clk_core_disable_unprepare() are called
directly, and these two functions do not determine CLK_IGNORE_UNUSED,
This causes the clock to be disabled even if CLK_IGNORE_UNUSED is
configured when clk_core_disable_unprepare() is called.

Two new functions clk_disable_unprepare_unused() and
clk_prepare_enable_unused() are added to resolve the preceding
situation. The CLK_IGNORE_UNUSED judgment logic is added to these two
functions. To prevent clock configuration CLK_IGNORE_UNUSED from
possible failure.

Change-Id: I56943e17b86436254f07d9b8cdbc35599328d519
Signed-off-by: Chuan Liu <chuan.liu@amlogic.com>
---
 drivers/clk/clk.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 65 insertions(+), 2 deletions(-)

Comments

Jerome Brunet Sept. 30, 2024, 12:27 p.m. UTC | #1
On Sun 29 Sep 2024 at 14:10, Chuan Liu via B4 Relay <devnull+chuan.liu.amlogic.com@kernel.org> wrote:

> From: Chuan Liu <chuan.liu@amlogic.com>
>
> When the clk_disable_unused_subtree() function disables an unused clock,
> if CLK_OPS_PARENT_ENABLE is configured on the clock,
> clk_core_prepare_enable() and clk_core_disable_unprepare() are called
> directly, and these two functions do not determine CLK_IGNORE_UNUSED,
> This causes the clock to be disabled even if CLK_IGNORE_UNUSED is
> configured when clk_core_disable_unprepare() is called.
>
> Two new functions clk_disable_unprepare_unused() and
> clk_prepare_enable_unused() are added to resolve the preceding
> situation. The CLK_IGNORE_UNUSED judgment logic is added to these two
> functions. To prevent clock configuration CLK_IGNORE_UNUSED from
> possible failure.
>
> Change-Id: I56943e17b86436254f07d9b8cdbc35599328d519
> Signed-off-by: Chuan Liu <chuan.liu@amlogic.com>
> ---
>  drivers/clk/clk.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 65 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 285ed1ad8a37..5d3316699b57 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -94,6 +94,7 @@ struct clk_core {
>  	struct hlist_node	debug_node;
>  #endif
>  	struct kref		ref;
> +	bool			ignore_enabled;
>  };
>  
>  #define CREATE_TRACE_POINTS
> @@ -1479,6 +1480,68 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
>  	}
>  }
>  
> +static void __init clk_disable_unprepare_unused(struct clk_core *core)
> +{
> +	unsigned long flags;
> +
> +	lockdep_assert_held(&prepare_lock);
> +
> +	if (!core)
> +		return;
> +
> +	if ((core->enable_count == 0) && core->ops->disable &&
> +	    !core->ignore_enabled) {
> +		flags = clk_enable_lock();

Used core->enable_count without taking the lock

> +		core->ops->disable(core->hw);

If the there is any CLK_IS_CRITICAL in the path, it is game over.
You've basically disregarded all the other CCF flags which are equally
important to the ones you are dealing with.

> +		clk_enable_unlock(flags);
> +	}
> +
> +	if ((core->prepare_count == 0) && core->ops->unprepare &&
> +	    !core->ignore_enabled)
> +		core->ops->unprepare(core->hw);
> +
> +	core->ignore_enabled = false;
> +
> +	clk_disable_unprepare_unused(core->parent);

Here you are disabling the parent of any CLK_IGNORE_UNUSED clock.
IMO, the problem is not solved. It just shifted.

> +}
> +
> +static int __init clk_prepare_enable_unused(struct clk_core *core)
> +{
> +	int ret = 0;
> +	unsigned long flags;
> +
> +	lockdep_assert_held(&prepare_lock);
> +
> +	if (!core)
> +		return 0;
> +
> +	ret = clk_prepare_enable_unused(core->parent);
> +	if (ret)
> +		return ret;

That's adding another recursion in CCF, something Stephen would like to remove

> +
> +	if ((core->flags & CLK_IGNORE_UNUSED) && clk_core_is_enabled(core))
> +		core->ignore_enabled = true;
> +
> +	if ((core->prepare_count == 0) && core->ops->prepare) {
> +		ret = core->ops->prepare(core->hw);
> +		if (ret)
> +			goto disable_unprepare;
> +	}
> +
> +	if ((core->enable_count == 0) && core->ops->enable) {
> +		flags = clk_enable_lock();
> +		ret = core->ops->enable(core->hw);
> +		clk_enable_unlock(flags);
> +		if (ret)
> +			goto disable_unprepare;
> +	}
> +
> +	return 0;
> +disable_unprepare:
> +	clk_disable_unprepare_unused(core->parent);
> +	return ret;
> +}
> +
>  static void __init clk_disable_unused_subtree(struct clk_core *core)
>  {
>  	struct clk_core *child;
> @@ -1490,7 +1553,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
>  		clk_disable_unused_subtree(child);
>  
>  	if (core->flags & CLK_OPS_PARENT_ENABLE)
> -		clk_core_prepare_enable(core->parent);
> +		clk_prepare_enable_unused(core->parent);
>  
>  	flags = clk_enable_lock();
>  
> @@ -1517,7 +1580,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
>  unlock_out:
>  	clk_enable_unlock(flags);
>  	if (core->flags & CLK_OPS_PARENT_ENABLE)
> -		clk_core_disable_unprepare(core->parent);
> +		clk_disable_unprepare_unused(core->parent);
>  }
>  
>  static bool clk_ignore_unused __initdata;
Chuan Liu Nov. 8, 2024, 1:02 p.m. UTC | #2
Hi Jerome:

     Thanks for your suggestion.


On 9/30/2024 8:27 PM, Jerome Brunet wrote:
> [ EXTERNAL EMAIL ]
>
> On Sun 29 Sep 2024 at 14:10, Chuan Liu via B4 Relay <devnull+chuan.liu.amlogic.com@kernel.org> wrote:
>
>> From: Chuan Liu <chuan.liu@amlogic.com>
>>
>> When the clk_disable_unused_subtree() function disables an unused clock,
>> if CLK_OPS_PARENT_ENABLE is configured on the clock,
>> clk_core_prepare_enable() and clk_core_disable_unprepare() are called
>> directly, and these two functions do not determine CLK_IGNORE_UNUSED,
>> This causes the clock to be disabled even if CLK_IGNORE_UNUSED is
>> configured when clk_core_disable_unprepare() is called.
>>
>> Two new functions clk_disable_unprepare_unused() and
>> clk_prepare_enable_unused() are added to resolve the preceding
>> situation. The CLK_IGNORE_UNUSED judgment logic is added to these two
>> functions. To prevent clock configuration CLK_IGNORE_UNUSED from
>> possible failure.
>>
>> Change-Id: I56943e17b86436254f07d9b8cdbc35599328d519
>> Signed-off-by: Chuan Liu <chuan.liu@amlogic.com>
>> ---
>>   drivers/clk/clk.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
>>   1 file changed, 65 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
>> index 285ed1ad8a37..5d3316699b57 100644
>> --- a/drivers/clk/clk.c
>> +++ b/drivers/clk/clk.c
>> @@ -94,6 +94,7 @@ struct clk_core {
>>        struct hlist_node       debug_node;
>>   #endif
>>        struct kref             ref;
>> +     bool                    ignore_enabled;
>>   };
>>
>>   #define CREATE_TRACE_POINTS
>> @@ -1479,6 +1480,68 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
>>        }
>>   }
>>
>> +static void __init clk_disable_unprepare_unused(struct clk_core *core)
>> +{
>> +     unsigned long flags;
>> +
>> +     lockdep_assert_held(&prepare_lock);
>> +
>> +     if (!core)
>> +             return;
>> +
>> +     if ((core->enable_count == 0) && core->ops->disable &&
>> +         !core->ignore_enabled) {
>> +             flags = clk_enable_lock();
> Used core->enable_count without taking the lock

My understanding is that adding a spinlock here is to ensure that
the disabling of the clock can be completed without interference.

>
>> +             core->ops->disable(core->hw);
> If the there is any CLK_IS_CRITICAL in the path, it is game over.
> You've basically disregarded all the other CCF flags which are equally
> important to the ones you are dealing with.

if clock is CLK_IS_CRITICAL then its enable_count > 0 does not go into
the 'if'.

>
>> +             clk_enable_unlock(flags);
>> +     }
>> +
>> +     if ((core->prepare_count == 0) && core->ops->unprepare &&
>> +         !core->ignore_enabled)
>> +             core->ops->unprepare(core->hw);
>> +
>> +     core->ignore_enabled = false;
>> +
>> +     clk_disable_unprepare_unused(core->parent);
> Here you are disabling the parent of any CLK_IGNORE_UNUSED clock.
> IMO, the problem is not solved. It just shifted.

Yes, it does not take into account the situation where its parent is
disabled without configuring CLK_IGNORE_UNUSED.

>
>> +}
>> +
>> +static int __init clk_prepare_enable_unused(struct clk_core *core)
>> +{
>> +     int ret = 0;
>> +     unsigned long flags;
>> +
>> +     lockdep_assert_held(&prepare_lock);
>> +
>> +     if (!core)
>> +             return 0;
>> +
>> +     ret = clk_prepare_enable_unused(core->parent);
>> +     if (ret)
>> +             return ret;
> That's adding another recursion in CCF, something Stephen would like to remove

This patch is meant to throw out an idea and bring attention to the
problem that was discovered.

>> +
>> +     if ((core->flags & CLK_IGNORE_UNUSED) && clk_core_is_enabled(core))
>> +             core->ignore_enabled = true;
>> +
>> +     if ((core->prepare_count == 0) && core->ops->prepare) {
>> +             ret = core->ops->prepare(core->hw);
>> +             if (ret)
>> +                     goto disable_unprepare;
>> +     }
>> +
>> +     if ((core->enable_count == 0) && core->ops->enable) {
>> +             flags = clk_enable_lock();
>> +             ret = core->ops->enable(core->hw);
>> +             clk_enable_unlock(flags);
>> +             if (ret)
>> +                     goto disable_unprepare;
>> +     }
>> +
>> +     return 0;
>> +disable_unprepare:
>> +     clk_disable_unprepare_unused(core->parent);
>> +     return ret;
>> +}
>> +
>>   static void __init clk_disable_unused_subtree(struct clk_core *core)
>>   {
>>        struct clk_core *child;
>> @@ -1490,7 +1553,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
>>                clk_disable_unused_subtree(child);
>>
>>        if (core->flags & CLK_OPS_PARENT_ENABLE)
>> -             clk_core_prepare_enable(core->parent);
>> +             clk_prepare_enable_unused(core->parent);
>>
>>        flags = clk_enable_lock();
>>
>> @@ -1517,7 +1580,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
>>   unlock_out:
>>        clk_enable_unlock(flags);
>>        if (core->flags & CLK_OPS_PARENT_ENABLE)
>> -             clk_core_disable_unprepare(core->parent);
>> +             clk_disable_unprepare_unused(core->parent);
>>   }
>>
>>   static bool clk_ignore_unused __initdata;
> --
> Jerome
diff mbox series

Patch

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 285ed1ad8a37..5d3316699b57 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -94,6 +94,7 @@  struct clk_core {
 	struct hlist_node	debug_node;
 #endif
 	struct kref		ref;
+	bool			ignore_enabled;
 };
 
 #define CREATE_TRACE_POINTS
@@ -1479,6 +1480,68 @@  static void __init clk_unprepare_unused_subtree(struct clk_core *core)
 	}
 }
 
+static void __init clk_disable_unprepare_unused(struct clk_core *core)
+{
+	unsigned long flags;
+
+	lockdep_assert_held(&prepare_lock);
+
+	if (!core)
+		return;
+
+	if ((core->enable_count == 0) && core->ops->disable &&
+	    !core->ignore_enabled) {
+		flags = clk_enable_lock();
+		core->ops->disable(core->hw);
+		clk_enable_unlock(flags);
+	}
+
+	if ((core->prepare_count == 0) && core->ops->unprepare &&
+	    !core->ignore_enabled)
+		core->ops->unprepare(core->hw);
+
+	core->ignore_enabled = false;
+
+	clk_disable_unprepare_unused(core->parent);
+}
+
+static int __init clk_prepare_enable_unused(struct clk_core *core)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	lockdep_assert_held(&prepare_lock);
+
+	if (!core)
+		return 0;
+
+	ret = clk_prepare_enable_unused(core->parent);
+	if (ret)
+		return ret;
+
+	if ((core->flags & CLK_IGNORE_UNUSED) && clk_core_is_enabled(core))
+		core->ignore_enabled = true;
+
+	if ((core->prepare_count == 0) && core->ops->prepare) {
+		ret = core->ops->prepare(core->hw);
+		if (ret)
+			goto disable_unprepare;
+	}
+
+	if ((core->enable_count == 0) && core->ops->enable) {
+		flags = clk_enable_lock();
+		ret = core->ops->enable(core->hw);
+		clk_enable_unlock(flags);
+		if (ret)
+			goto disable_unprepare;
+	}
+
+	return 0;
+disable_unprepare:
+	clk_disable_unprepare_unused(core->parent);
+	return ret;
+}
+
 static void __init clk_disable_unused_subtree(struct clk_core *core)
 {
 	struct clk_core *child;
@@ -1490,7 +1553,7 @@  static void __init clk_disable_unused_subtree(struct clk_core *core)
 		clk_disable_unused_subtree(child);
 
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
-		clk_core_prepare_enable(core->parent);
+		clk_prepare_enable_unused(core->parent);
 
 	flags = clk_enable_lock();
 
@@ -1517,7 +1580,7 @@  static void __init clk_disable_unused_subtree(struct clk_core *core)
 unlock_out:
 	clk_enable_unlock(flags);
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
-		clk_core_disable_unprepare(core->parent);
+		clk_disable_unprepare_unused(core->parent);
 }
 
 static bool clk_ignore_unused __initdata;