diff mbox series

[v2,1/5] clk: scmi: Allocate CLK operations dynamically

Message ID 20240325210025.1448717-2-cristian.marussi@arm.com (mailing list archive)
State Changes Requested, archived
Headers show
Series Rework SCMI Clock driver clk_ops setup procedure | expand

Commit Message

Cristian Marussi March 25, 2024, 9 p.m. UTC
SCMI Clocks descriptors expose an increasing number of properties, thing
which, in turn, leads to a varying set of supported CLK operations to be
associated with each clock.

Providing statically pre-defined CLK operations structs for all the
possible combinations of allowed clock features is becoming cumbersome and
error-prone.

Allocate the per-clock operations descriptors dynamically and populate it
with the strictly needed set of operations depending on the advertised
clock properties: one descriptor is created for each distinct combination
of clock operations, so minimizing the number of clk_ops structures to the
strictly minimum needed.

CC: Michael Turquette <mturquette@baylibre.com>
CC: Stephen Boyd <sboyd@kernel.org>
CC: linux-clk@vger.kernel.org
Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
---
 drivers/clk/clk-scmi.c | 163 ++++++++++++++++++++++++++++-------------
 1 file changed, 114 insertions(+), 49 deletions(-)

Comments

Florian Fainelli March 30, 2024, 3:42 a.m. UTC | #1
On 25/03/2024 14:00, Cristian Marussi wrote:
> SCMI Clocks descriptors expose an increasing number of properties, thing
> which, in turn, leads to a varying set of supported CLK operations to be
> associated with each clock.
> 
> Providing statically pre-defined CLK operations structs for all the
> possible combinations of allowed clock features is becoming cumbersome and
> error-prone.
> 
> Allocate the per-clock operations descriptors dynamically and populate it
> with the strictly needed set of operations depending on the advertised
> clock properties: one descriptor is created for each distinct combination
> of clock operations, so minimizing the number of clk_ops structures to the
> strictly minimum needed.
> 
> CC: Michael Turquette <mturquette@baylibre.com>
> CC: Stephen Boyd <sboyd@kernel.org>
> CC: linux-clk@vger.kernel.org
> Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>

Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
Stephen Boyd April 8, 2024, 4:38 a.m. UTC | #2
Quoting Cristian Marussi (2024-03-25 14:00:21)
> diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
> index 8cbe24789c24..d5d369b052bd 100644
> --- a/drivers/clk/clk-scmi.c
> +++ b/drivers/clk/clk-scmi.c
> @@ -16,6 +16,14 @@
>  #define NOT_ATOMIC     false
>  #define ATOMIC         true
>  
> +enum scmi_clk_feats {
> +       SCMI_CLK_ATOMIC_SUPPORTED,
> +       SCMI_CLK_MAX_FEATS
> +};
> +
> +#define SCMI_MAX_CLK_OPS       (1 << SCMI_CLK_MAX_FEATS)
> +
> +static const struct clk_ops *clk_ops_db[SCMI_MAX_CLK_OPS];

Can it be 'scmi_clk_ops_db' for some name spacing?

>  static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
>  
>  struct scmi_clk {
> @@ -230,6 +202,106 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
>         return ret;
>  }
>  
> +/**
> + * scmi_clk_ops_alloc() - Alloc and configure clock operations
> + * @dev: A device reference for devres
> + * @feats_key: A bitmap representing the desired clk_ops capabilities.

Drop the period please because it's not consistent with the previous
argument descriptor.

> + *
> + * Allocate and configure a proper set of clock operations depending on the
> + * specifically required SCMI clock features.
> + *
> + * Return: A pointer to the allocated and configured clk_ops on Success,

Lowercase 'Success'.

> +
> +/**
> + * scmi_clk_ops_select() - Select a proper set of clock operations
> + * @sclk: A reference to an SCMI clock descriptor
> + * @atomic_capable: A flag to indicate if atomic mode is supported by the
> + *                 transport
> + * @atomic_threshold: Platform atomic threshold value

Is this in nanoseconds, microseconds, or ??? Maybe a better description is
"clk_ops are atomic when clk enable_latency is less than X [time unit]" 

> + *
> + * After having built a bitmap descriptor to represent the set of features
> + * needed by this SCMI clock, at first use it to lookup into the set of
> + * previously allocated clk_ops to check if a suitable combination of clock
> + * operations was already created; when no match is found allocate a brand new
> + * set of clk_ops satisfying the required combination of features and save it
> + * for future references.
> + *
> + * In this way only one set of clk_ops is ever created for each different
> + * combination that is effectively needed.
> + *
> + * Return: A pointer to the allocated and configured clk_ops on Success, or

Lowercase 'Success'.

> + *        NULL otherwise.
> + */
> +static const struct clk_ops *
> +scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
> +                   unsigned int atomic_threshold)
> +{
> +       const struct scmi_clock_info *ci = sclk->info;
> +       unsigned int feats_key = 0;
> +       const struct clk_ops *ops;
> +
> +       /*
> +        * Note that when transport is atomic but SCMI protocol did not
> +        * specify (or support) an enable_latency associated with a
> +        * clock, we default to use atomic operations mode.
> +        */
> +       if (atomic_capable && ci->enable_latency <= atomic_threshold)
> +               feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
> +

Can we have a static_assert() here that makes sure 'feats_key' isn't
larger than the size of clk_ops_db?

	static_assert(ARRAY_SIZE(clk_ops_db) >= feats_key);

> +       /* Lookup previously allocated ops */
> +       ops = clk_ops_db[feats_key];
> +       if (!ops) {
> +               ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
> +               if (!ops)
> +                       return NULL;

This could be less nested if the first lookup is put in
scmi_clk_ops_alloc() and the store below is folded in. Or an early
return if found.

	ops = clk_ops_db[feats_key];
	if (ops)
		return ops;

	/* Didn't find one */
	ops = scmi_clk_ops_alloc(...)
	if (!ops)
		return NULL;

	clk_ops_db[feats_key] = ops;
	return ops;
		
> +
> +               /* Store new ops combinations */
> +               clk_ops_db[feats_key] = ops;
> +       }
> +
> +       return ops;
> +}
> +
>  static int scmi_clocks_probe(struct scmi_device *sdev)
>  {
>         int idx, count, err;
> @@ -285,16 +357,10 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
>                 sclk->ph = ph;
>                 sclk->dev = dev;
>  
> -               /*
> -                * Note that when transport is atomic but SCMI protocol did not
> -                * specify (or support) an enable_latency associated with a
> -                * clock, we default to use atomic operations mode.
> -                */
> -               if (is_atomic &&
> -                   sclk->info->enable_latency <= atomic_threshold)
> -                       scmi_ops = &scmi_atomic_clk_ops;
> -               else
> -                       scmi_ops = &scmi_clk_ops;
> +               scmi_ops = scmi_clk_ops_select(sclk, is_atomic,

'is_atomic' should probably be 'transport_is_atomic' so this reads
easier.

> +                                              atomic_threshold);
> +               if (!scmi_ops)
> +                       return -ENOMEM;
>  
>                 /* Initialize clock parent data. */
>                 if (sclk->info->num_parents > 0) {
Cristian Marussi April 8, 2024, 6:23 p.m. UTC | #3
On Sun, Apr 07, 2024 at 09:38:46PM -0700, Stephen Boyd wrote:
> Quoting Cristian Marussi (2024-03-25 14:00:21)
> > diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
> > index 8cbe24789c24..d5d369b052bd 100644
> > --- a/drivers/clk/clk-scmi.c
> > +++ b/drivers/clk/clk-scmi.c
> > @@ -16,6 +16,14 @@
> >  #define NOT_ATOMIC     false
> >  #define ATOMIC         true
> >  

Hi, 

thanks for the review.

> > +enum scmi_clk_feats {
> > +       SCMI_CLK_ATOMIC_SUPPORTED,
> > +       SCMI_CLK_MAX_FEATS
> > +};
> > +
> > +#define SCMI_MAX_CLK_OPS       (1 << SCMI_CLK_MAX_FEATS)
> > +
> > +static const struct clk_ops *clk_ops_db[SCMI_MAX_CLK_OPS];
> 
> Can it be 'scmi_clk_ops_db' for some name spacing?
> 

Yes.

> >  static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
> >  
> >  struct scmi_clk {
> > @@ -230,6 +202,106 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
> >         return ret;
> >  }
> >  
> > +/**
> > + * scmi_clk_ops_alloc() - Alloc and configure clock operations
> > + * @dev: A device reference for devres
> > + * @feats_key: A bitmap representing the desired clk_ops capabilities.
> 
> Drop the period please because it's not consistent with the previous
> argument descriptor.
>

Ok.
 
> > + *
> > + * Allocate and configure a proper set of clock operations depending on the
> > + * specifically required SCMI clock features.
> > + *
> > + * Return: A pointer to the allocated and configured clk_ops on Success,
> 
> Lowercase 'Success'.
>

Ok.
 
> > +
> > +/**
> > + * scmi_clk_ops_select() - Select a proper set of clock operations
> > + * @sclk: A reference to an SCMI clock descriptor
> > + * @atomic_capable: A flag to indicate if atomic mode is supported by the
> > + *                 transport
> > + * @atomic_threshold: Platform atomic threshold value
> 
> Is this in nanoseconds, microseconds, or ??? Maybe a better description is
> "clk_ops are atomic when clk enable_latency is less than X [time unit]" 
>

It is micro, I will comment better.

> > + *
> > + * After having built a bitmap descriptor to represent the set of features
> > + * needed by this SCMI clock, at first use it to lookup into the set of
> > + * previously allocated clk_ops to check if a suitable combination of clock
> > + * operations was already created; when no match is found allocate a brand new
> > + * set of clk_ops satisfying the required combination of features and save it
> > + * for future references.
> > + *
> > + * In this way only one set of clk_ops is ever created for each different
> > + * combination that is effectively needed.
> > + *
> > + * Return: A pointer to the allocated and configured clk_ops on Success, or
> 
> Lowercase 'Success'.
>

Ok.
 
> > + *        NULL otherwise.
> > + */
> > +static const struct clk_ops *
> > +scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
> > +                   unsigned int atomic_threshold)
> > +{
> > +       const struct scmi_clock_info *ci = sclk->info;
> > +       unsigned int feats_key = 0;
> > +       const struct clk_ops *ops;
> > +
> > +       /*
> > +        * Note that when transport is atomic but SCMI protocol did not
> > +        * specify (or support) an enable_latency associated with a
> > +        * clock, we default to use atomic operations mode.
> > +        */
> > +       if (atomic_capable && ci->enable_latency <= atomic_threshold)
> > +               feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
> > +
> 
> Can we have a static_assert() here that makes sure 'feats_key' isn't
> larger than the size of clk_ops_db?
> 
> 	static_assert(ARRAY_SIZE(clk_ops_db) >= feats_key);
> 

Ok.

> > +       /* Lookup previously allocated ops */
> > +       ops = clk_ops_db[feats_key];
> > +       if (!ops) {
> > +               ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
> > +               if (!ops)
> > +                       return NULL;
> 
> This could be less nested if the first lookup is put in
> scmi_clk_ops_alloc() and the store below is folded in. Or an early
> return if found.
> 
> 	ops = clk_ops_db[feats_key];
> 	if (ops)
> 		return ops;
> 
> 	/* Didn't find one */
> 	ops = scmi_clk_ops_alloc(...)
> 	if (!ops)
> 		return NULL;
> 
> 	clk_ops_db[feats_key] = ops;
> 	return ops;
> 

ok.
		
> > +
> > +               /* Store new ops combinations */
> > +               clk_ops_db[feats_key] = ops;
> > +       }
> > +
> > +       return ops;
> > +}
> > +
> >  static int scmi_clocks_probe(struct scmi_device *sdev)
> >  {
> >         int idx, count, err;
> > @@ -285,16 +357,10 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
> >                 sclk->ph = ph;
> >                 sclk->dev = dev;
> >  
> > -               /*
> > -                * Note that when transport is atomic but SCMI protocol did not
> > -                * specify (or support) an enable_latency associated with a
> > -                * clock, we default to use atomic operations mode.
> > -                */
> > -               if (is_atomic &&
> > -                   sclk->info->enable_latency <= atomic_threshold)
> > -                       scmi_ops = &scmi_atomic_clk_ops;
> > -               else
> > -                       scmi_ops = &scmi_clk_ops;
> > +               scmi_ops = scmi_clk_ops_select(sclk, is_atomic,
> 
> 'is_atomic' should probably be 'transport_is_atomic' so this reads
> easier.
> 

Ok.

Thanks,
Cristian
diff mbox series

Patch

diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 8cbe24789c24..d5d369b052bd 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -2,7 +2,7 @@ 
 /*
  * System Control and Power Interface (SCMI) Protocol based clock driver
  *
- * Copyright (C) 2018-2022 ARM Ltd.
+ * Copyright (C) 2018-2024 ARM Ltd.
  */
 
 #include <linux/clk-provider.h>
@@ -16,6 +16,14 @@ 
 #define NOT_ATOMIC	false
 #define ATOMIC		true
 
+enum scmi_clk_feats {
+	SCMI_CLK_ATOMIC_SUPPORTED,
+	SCMI_CLK_MAX_FEATS
+};
+
+#define SCMI_MAX_CLK_OPS	(1 << SCMI_CLK_MAX_FEATS)
+
+static const struct clk_ops *clk_ops_db[SCMI_MAX_CLK_OPS];
 static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
 
 struct scmi_clk {
@@ -158,42 +166,6 @@  static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
 	return !!enabled;
 }
 
-/*
- * We can provide enable/disable/is_enabled atomic callbacks only if the
- * underlying SCMI transport for an SCMI instance is configured to handle
- * SCMI commands in an atomic manner.
- *
- * When no SCMI atomic transport support is available we instead provide only
- * the prepare/unprepare API, as allowed by the clock framework when atomic
- * calls are not available.
- *
- * Two distinct sets of clk_ops are provided since we could have multiple SCMI
- * instances with different underlying transport quality, so they cannot be
- * shared.
- */
-static const struct clk_ops scmi_clk_ops = {
-	.recalc_rate = scmi_clk_recalc_rate,
-	.round_rate = scmi_clk_round_rate,
-	.set_rate = scmi_clk_set_rate,
-	.prepare = scmi_clk_enable,
-	.unprepare = scmi_clk_disable,
-	.set_parent = scmi_clk_set_parent,
-	.get_parent = scmi_clk_get_parent,
-	.determine_rate = scmi_clk_determine_rate,
-};
-
-static const struct clk_ops scmi_atomic_clk_ops = {
-	.recalc_rate = scmi_clk_recalc_rate,
-	.round_rate = scmi_clk_round_rate,
-	.set_rate = scmi_clk_set_rate,
-	.enable = scmi_clk_atomic_enable,
-	.disable = scmi_clk_atomic_disable,
-	.is_enabled = scmi_clk_atomic_is_enabled,
-	.set_parent = scmi_clk_set_parent,
-	.get_parent = scmi_clk_get_parent,
-	.determine_rate = scmi_clk_determine_rate,
-};
-
 static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
 			     const struct clk_ops *scmi_ops)
 {
@@ -230,6 +202,106 @@  static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
 	return ret;
 }
 
+/**
+ * scmi_clk_ops_alloc() - Alloc and configure clock operations
+ * @dev: A device reference for devres
+ * @feats_key: A bitmap representing the desired clk_ops capabilities.
+ *
+ * Allocate and configure a proper set of clock operations depending on the
+ * specifically required SCMI clock features.
+ *
+ * Return: A pointer to the allocated and configured clk_ops on Success,
+ *	   or NULL on allocation failure.
+ */
+static const struct clk_ops *
+scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
+{
+	struct clk_ops *ops;
+
+	ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
+	if (!ops)
+		return NULL;
+	/*
+	 * We can provide enable/disable/is_enabled atomic callbacks only if the
+	 * underlying SCMI transport for an SCMI instance is configured to
+	 * handle SCMI commands in an atomic manner.
+	 *
+	 * When no SCMI atomic transport support is available we instead provide
+	 * only the prepare/unprepare API, as allowed by the clock framework
+	 * when atomic calls are not available.
+	 */
+	if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
+		ops->enable = scmi_clk_atomic_enable;
+		ops->disable = scmi_clk_atomic_disable;
+		ops->is_enabled = scmi_clk_atomic_is_enabled;
+	} else {
+		ops->prepare = scmi_clk_enable;
+		ops->unprepare = scmi_clk_disable;
+	}
+
+	/* Rate ops */
+	ops->recalc_rate = scmi_clk_recalc_rate;
+	ops->round_rate = scmi_clk_round_rate;
+	ops->determine_rate = scmi_clk_determine_rate;
+	ops->set_rate = scmi_clk_set_rate;
+
+	/* Parent ops */
+	ops->get_parent = scmi_clk_get_parent;
+	ops->set_parent = scmi_clk_set_parent;
+
+	return ops;
+}
+
+/**
+ * scmi_clk_ops_select() - Select a proper set of clock operations
+ * @sclk: A reference to an SCMI clock descriptor
+ * @atomic_capable: A flag to indicate if atomic mode is supported by the
+ *		    transport
+ * @atomic_threshold: Platform atomic threshold value
+ *
+ * After having built a bitmap descriptor to represent the set of features
+ * needed by this SCMI clock, at first use it to lookup into the set of
+ * previously allocated clk_ops to check if a suitable combination of clock
+ * operations was already created; when no match is found allocate a brand new
+ * set of clk_ops satisfying the required combination of features and save it
+ * for future references.
+ *
+ * In this way only one set of clk_ops is ever created for each different
+ * combination that is effectively needed.
+ *
+ * Return: A pointer to the allocated and configured clk_ops on Success, or
+ *	   NULL otherwise.
+ */
+static const struct clk_ops *
+scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
+		    unsigned int atomic_threshold)
+{
+	const struct scmi_clock_info *ci = sclk->info;
+	unsigned int feats_key = 0;
+	const struct clk_ops *ops;
+
+	/*
+	 * Note that when transport is atomic but SCMI protocol did not
+	 * specify (or support) an enable_latency associated with a
+	 * clock, we default to use atomic operations mode.
+	 */
+	if (atomic_capable && ci->enable_latency <= atomic_threshold)
+		feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
+
+	/* Lookup previously allocated ops */
+	ops = clk_ops_db[feats_key];
+	if (!ops) {
+		ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
+		if (!ops)
+			return NULL;
+
+		/* Store new ops combinations */
+		clk_ops_db[feats_key] = ops;
+	}
+
+	return ops;
+}
+
 static int scmi_clocks_probe(struct scmi_device *sdev)
 {
 	int idx, count, err;
@@ -285,16 +357,10 @@  static int scmi_clocks_probe(struct scmi_device *sdev)
 		sclk->ph = ph;
 		sclk->dev = dev;
 
-		/*
-		 * Note that when transport is atomic but SCMI protocol did not
-		 * specify (or support) an enable_latency associated with a
-		 * clock, we default to use atomic operations mode.
-		 */
-		if (is_atomic &&
-		    sclk->info->enable_latency <= atomic_threshold)
-			scmi_ops = &scmi_atomic_clk_ops;
-		else
-			scmi_ops = &scmi_clk_ops;
+		scmi_ops = scmi_clk_ops_select(sclk, is_atomic,
+					       atomic_threshold);
+		if (!scmi_ops)
+			return -ENOMEM;
 
 		/* Initialize clock parent data. */
 		if (sclk->info->num_parents > 0) {
@@ -318,8 +384,7 @@  static int scmi_clocks_probe(struct scmi_device *sdev)
 		} else {
 			dev_dbg(dev, "Registered clock:%s%s\n",
 				sclk->info->name,
-				scmi_ops == &scmi_atomic_clk_ops ?
-				" (atomic ops)" : "");
+				scmi_ops->enable ? " (atomic ops)" : "");
 			hws[idx] = &sclk->hw;
 		}
 	}