@@ -3906,6 +3906,14 @@ static int __clk_core_init(struct clk_core *core)
unsigned long rate;
int phase;
+ /*
+ * While reparenting the orphans, it is too late to resume the children
+ * that will get their rates recalculated.
+ */
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+
clk_prepare_lock();
/*
@@ -3916,10 +3924,6 @@ static int __clk_core_init(struct clk_core *core)
*/
core->hw->core = core;
- ret = clk_pm_runtime_get(core);
- if (ret)
- goto unlock;
-
/* check to see if a clock with this name is already registered */
if (clk_core_lookup(core->name)) {
pr_debug("%s: clk %s already initialized\n",
@@ -4082,8 +4086,6 @@ static int __clk_core_init(struct clk_core *core)
clk_core_reparent_orphans_nolock();
out:
- clk_pm_runtime_put(core);
-unlock:
if (ret) {
hlist_del_init(&core->child_node);
core->hw->core = NULL;
@@ -4091,6 +4093,8 @@ static int __clk_core_init(struct clk_core *core)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
if (!ret)
clk_debug_register(core);
In order to fix the ABBA locking situation between clock and power domains, let's disimburse these two locks by preventing any runtime PM call to happen with the clk prepare_lock mutex acquired. The __clock_core_init() routine shall preferably call the runtime PM functions before acquiring the prepare lock, which can be achieved by the existing clk_pm_runtime_get_all() call. We have no other choice than waking up all clocks at this stage because we are going the possibly reparent any random orphan clock in the system at this stage, and thus will require all its parents and children to be enabled. Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com> --- drivers/clk/clk.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)