diff mbox series

[RFC,09/10] clk: Make sure clock parents and children are resumed when necessary

Message ID 20250326-cross-lock-dep-v1-9-3199e49e8652@bootlin.com (mailing list archive)
State New
Headers show
Series Fix the ABBA locking situation between clk and runtime PM | expand

Commit Message

Miquel Raynal March 26, 2025, 6:26 p.m. UTC
Any pm_runtime_get() call will both wake up the core clock as well as
its parents. But there are some cases which also require resuming the
children clocks. One way to do that is to use the new
pm_runtime_get_consumers() helper.

It's been identified that the following situation may require resuming
the children:
- getting the rate
- setting the rate
- changing the parent (especially since it may produce rate changes)
- putting the clock, which may involve reparenting as well

In order to fix the ABBA locking situation between clock and power
domains, let's disimburse these two locks by resuming the children
outside of the prepare_lock in one function call by using this new
helper.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
 drivers/clk/clk.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 77 insertions(+)
diff mbox series

Patch

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 339ebfa8cca729ffb84127e01a21f741bc270cb3..26af3a134fa7b9d7f4a77ff473df7e79fd465789 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -119,6 +119,20 @@  static int clk_pm_runtime_get(struct clk_core *core)
 	return pm_runtime_resume_and_get(core->dev);
 }
 
+static int clk_pm_runtime_get_and_consumers(struct clk_core *core)
+{
+	int ret;
+
+	if (!core || !core->rpm_enabled)
+		return 0;
+
+	ret = pm_runtime_resume_and_get(core->dev);
+	if (!ret)
+		pm_runtime_get_consumers(core->dev);
+
+	return ret;
+}
+
 static int clk_pm_runtime_get_if_active(struct clk_core *core)
 {
 	int ret;
@@ -141,6 +155,16 @@  static void clk_pm_runtime_put(struct clk_core *core)
 	pm_runtime_put_sync(core->dev);
 }
 
+static void clk_pm_runtime_put_and_consumers(struct clk_core *core)
+{
+	if (!core || !core->rpm_enabled)
+		return;
+
+	pm_runtime_put_consumers(core->dev);
+
+	pm_runtime_put_sync(core->dev);
+}
+
 /**
  * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
  *
@@ -2010,10 +2034,15 @@  unsigned long clk_get_rate(struct clk *clk)
 	if (!clk)
 		return 0;
 
+	if (clk_pm_runtime_get_and_consumers(clk->core))
+		return 0;
+
 	clk_prepare_lock();
 	rate = clk_core_get_rate_recalc(clk->core);
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	return rate;
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -2605,6 +2634,10 @@  int clk_set_rate(struct clk *clk, unsigned long rate)
 	if (!clk)
 		return 0;
 
+	ret = clk_pm_runtime_get_and_consumers(clk->core);
+	if (ret)
+		return ret;
+
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
@@ -2618,6 +2651,8 @@  int clk_set_rate(struct clk *clk, unsigned long rate)
 
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_rate);
@@ -2648,6 +2683,10 @@  int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
 	if (!clk)
 		return 0;
 
+	ret = clk_pm_runtime_get_and_consumers(clk->core);
+	if (ret)
+		return ret;
+
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
@@ -2665,6 +2704,8 @@  int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
 
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
@@ -2755,12 +2796,18 @@  int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
 	if (!clk)
 		return 0;
 
+	ret = clk_pm_runtime_get_and_consumers(clk->core);
+	if (ret)
+		return ret;
+
 	clk_prepare_lock();
 
 	ret = clk_set_rate_range_nolock(clk, min, max);
 
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_rate_range);
@@ -2964,6 +3011,10 @@  int clk_set_parent(struct clk *clk, struct clk *parent)
 	if (!clk)
 		return 0;
 
+	ret = clk_pm_runtime_get_and_consumers(clk->core);
+	if (ret)
+		return ret;
+
 	clk_prepare_lock();
 
 	if (clk->exclusive_count)
@@ -2977,6 +3028,8 @@  int clk_set_parent(struct clk *clk, struct clk *parent)
 
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_parent);
@@ -3459,10 +3512,16 @@  static int clk_rate_set(void *data, u64 val)
 	struct clk_core *core = data;
 	int ret;
 
+	ret = clk_pm_runtime_get_and_consumers(core);
+	if (ret)
+		return ret;
+
 	clk_prepare_lock();
 	ret = clk_core_set_rate_nolock(core, val);
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(core);
+
 	return ret;
 }
 
@@ -3518,11 +3577,18 @@  DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
 static int clk_rate_get(void *data, u64 *val)
 {
 	struct clk_core *core = data;
+	int ret;
+
+	ret = clk_pm_runtime_get_and_consumers(core);
+	if (ret)
+		return ret;
 
 	clk_prepare_lock();
 	*val = clk_core_get_rate_recalc(core);
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(core);
+
 	return 0;
 }
 
@@ -3659,12 +3725,18 @@  static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
 	if (!parent)
 		return -ENOENT;
 
+	err = clk_pm_runtime_get_and_consumers(parent);
+	if (err)
+		return err;
+
 	clk_prepare_lock();
 	err = clk_core_set_parent_nolock(core, parent);
 	clk_prepare_unlock();
 	if (err)
 		return err;
 
+	clk_pm_runtime_put_and_consumers(parent);
+
 	return count;
 }
 
@@ -4762,6 +4834,9 @@  void __clk_put(struct clk *clk)
 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
 		return;
 
+	if (clk_pm_runtime_get_and_consumers(clk->core))
+		return;
+
 	clk_prepare_lock();
 
 	/*
@@ -4784,6 +4859,8 @@  void __clk_put(struct clk *clk)
 
 	clk_prepare_unlock();
 
+	clk_pm_runtime_put_and_consumers(clk->core);
+
 	owner = clk->core->owner;
 	kref_put(&clk->core->ref, __clk_release);
 	module_put(owner);