diff mbox series

[v3] clk: qcom: clk-rcg2: Add support for duty-cycle for RCG

Message ID 1619334502-9880-2-git-send-email-tdas@codeaurora.org (mailing list archive)
State Accepted, archived
Headers show
Series [v3] clk: qcom: clk-rcg2: Add support for duty-cycle for RCG | expand

Commit Message

Taniya Das April 25, 2021, 7:08 a.m. UTC
The root clock generators with MND divider has the capability to support
change in duty-cycle by updating the 'D'. Add the clock ops which would
check all the boundary conditions and enable setting the desired duty-cycle
as per the consumer.

Signed-off-by: Taniya Das <tdas@codeaurora.org>
---
 drivers/clk/qcom/clk-rcg2.c | 81 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 81 insertions(+)

--
Qualcomm INDIA, on behalf of Qualcomm Innovation Center, Inc.is a member
of the Code Aurora Forum, hosted by the  Linux Foundation.

Comments

Stephen Boyd May 26, 2021, 3:36 a.m. UTC | #1
Quoting Taniya Das (2021-04-25 00:08:22)
> The root clock generators with MND divider has the capability to support
> change in duty-cycle by updating the 'D'. Add the clock ops which would
> check all the boundary conditions and enable setting the desired duty-cycle
> as per the consumer.
> 
> Signed-off-by: Taniya Das <tdas@codeaurora.org>
> ---

Applied to clk-next with '_val' removed everywhere as it made it super
hard to read. I also don't like the (d / 2) stuff but I can live with
it.

---8<---

diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index aa03e315d891..e1b1b426fae4 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -360,7 +360,7 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
 static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-	u32 notn_m_val, n_val, m_val, d_val, not2d_val, mask;
+	u32 notn_m, n, m, d, not2d, mask;
 
 	if (!rcg->mnd_width) {
 		/* 50 % duty-cycle for Non-MND RCGs */
@@ -369,11 +369,11 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 		return 0;
 	}
 
-	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d_val);
-	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m_val);
-	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m_val);
+	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
+	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
+	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
 
-	if (!not2d_val && !m_val && !notn_m_val) {
+	if (!not2d && !m && !notn_m) {
 		/* 50 % duty-cycle always */
 		duty->num = 1;
 		duty->den = 2;
@@ -382,13 +382,13 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 
 	mask = BIT(rcg->mnd_width) - 1;
 
-	d_val = ~(not2d_val) & mask;
-	d_val = DIV_ROUND_CLOSEST(d_val, 2);
+	d = ~(not2d) & mask;
+	d = DIV_ROUND_CLOSEST(d, 2);
 
-	n_val = (~(notn_m_val) + m_val) & mask;
+	n = (~(notn_m) + m) & mask;
 
-	duty->num = d_val;
-	duty->den = n_val;
+	duty->num = d;
+	duty->den = n;
 
 	return 0;
 }
@@ -396,7 +396,7 @@ static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-	u32 notn_m_val, n_val, m_val, d_val, not2d_val, mask, duty_per;
+	u32 notn_m, n, m, d, not2d, mask, duty_per;
 	int ret;
 
 	/* Duty-cycle cannot be modified for non-MND RCGs */
@@ -405,29 +405,29 @@ static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
 
 	mask = BIT(rcg->mnd_width) - 1;
 
-	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m_val);
-	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m_val);
+	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
+	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
 
-	n_val = (~(notn_m_val) + m_val) & mask;
+	n = (~(notn_m) + m) & mask;
 
 	duty_per = (duty->num * 100) / duty->den;
 
 	/* Calculate 2d value */
-	d_val = DIV_ROUND_CLOSEST(n_val * duty_per * 2, 100);
+	d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
 
 	 /* Check bit widths of 2d. If D is too big reduce duty cycle. */
-	if (d_val > mask)
-		d_val = mask;
+	if (d > mask)
+		d = mask;
 
-	if ((d_val / 2) > (n_val - m_val))
-		d_val = (n_val - m_val) * 2;
-	else if ((d_val / 2) < (m_val / 2))
-		d_val = m_val;
+	if ((d / 2) > (n - m))
+		d = (n - m) * 2;
+	else if ((d / 2) < (m / 2))
+		d = m;
 
-	not2d_val = ~d_val & mask;
+	not2d = ~d & mask;
 
 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
-				 not2d_val);
+				 not2d);
 	if (ret)
 		return ret;
diff mbox series

Patch

diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 05ff3b0..aa03e31 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -357,6 +357,83 @@  static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
 }

+static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	u32 notn_m_val, n_val, m_val, d_val, not2d_val, mask;
+
+	if (!rcg->mnd_width) {
+		/* 50 % duty-cycle for Non-MND RCGs */
+		duty->num = 1;
+		duty->den = 2;
+		return 0;
+	}
+
+	regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d_val);
+	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m_val);
+	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m_val);
+
+	if (!not2d_val && !m_val && !notn_m_val) {
+		/* 50 % duty-cycle always */
+		duty->num = 1;
+		duty->den = 2;
+		return 0;
+	}
+
+	mask = BIT(rcg->mnd_width) - 1;
+
+	d_val = ~(not2d_val) & mask;
+	d_val = DIV_ROUND_CLOSEST(d_val, 2);
+
+	n_val = (~(notn_m_val) + m_val) & mask;
+
+	duty->num = d_val;
+	duty->den = n_val;
+
+	return 0;
+}
+
+static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	u32 notn_m_val, n_val, m_val, d_val, not2d_val, mask, duty_per;
+	int ret;
+
+	/* Duty-cycle cannot be modified for non-MND RCGs */
+	if (!rcg->mnd_width)
+		return -EINVAL;
+
+	mask = BIT(rcg->mnd_width) - 1;
+
+	regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m_val);
+	regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m_val);
+
+	n_val = (~(notn_m_val) + m_val) & mask;
+
+	duty_per = (duty->num * 100) / duty->den;
+
+	/* Calculate 2d value */
+	d_val = DIV_ROUND_CLOSEST(n_val * duty_per * 2, 100);
+
+	 /* Check bit widths of 2d. If D is too big reduce duty cycle. */
+	if (d_val > mask)
+		d_val = mask;
+
+	if ((d_val / 2) > (n_val - m_val))
+		d_val = (n_val - m_val) * 2;
+	else if ((d_val / 2) < (m_val / 2))
+		d_val = m_val;
+
+	not2d_val = ~d_val & mask;
+
+	ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
+				 not2d_val);
+	if (ret)
+		return ret;
+
+	return update_config(rcg);
+}
+
 const struct clk_ops clk_rcg2_ops = {
 	.is_enabled = clk_rcg2_is_enabled,
 	.get_parent = clk_rcg2_get_parent,
@@ -365,6 +442,8 @@  const struct clk_ops clk_rcg2_ops = {
 	.determine_rate = clk_rcg2_determine_rate,
 	.set_rate = clk_rcg2_set_rate,
 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+	.get_duty_cycle = clk_rcg2_get_duty_cycle,
+	.set_duty_cycle = clk_rcg2_set_duty_cycle,
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_ops);

@@ -376,6 +455,8 @@  const struct clk_ops clk_rcg2_floor_ops = {
 	.determine_rate = clk_rcg2_determine_floor_rate,
 	.set_rate = clk_rcg2_set_floor_rate,
 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
+	.get_duty_cycle = clk_rcg2_get_duty_cycle,
+	.set_duty_cycle = clk_rcg2_set_duty_cycle,
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);