diff mbox series

[net-next,v2,2/3] net: ti: icss-iep: Enable compare events

Message ID 20240604-iep-v2-2-ea8e1c0a5686@siemens.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Enable PTP timestamping/PPS for AM65x SR1.0 devices | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 901 this patch: 901
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/build_clang success Errors and warnings before: 905 this patch: 905
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 905 this patch: 905
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 116 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-06-04--18-00 (tests: 1045)

Commit Message

Diogo Ivo June 4, 2024, 1:15 p.m. UTC
The IEP module supports compare events, in which a value is written to a
hardware register and when the IEP counter reaches the written value an
interrupt is generated. Add handling for this interrupt in order to
support PPS events.

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Diogo Ivo <diogo.ivo@siemens.com>
---
 drivers/net/ethernet/ti/icssg/icss_iep.c | 74 ++++++++++++++++++++++++++++++++
 1 file changed, 74 insertions(+)

Comments

Wojciech Drewek June 4, 2024, 2:23 p.m. UTC | #1
On 04.06.2024 15:15, Diogo Ivo wrote:
> The IEP module supports compare events, in which a value is written to a
> hardware register and when the IEP counter reaches the written value an
> interrupt is generated. Add handling for this interrupt in order to
> support PPS events.
> 
> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
> Signed-off-by: Diogo Ivo <diogo.ivo@siemens.com>
> ---

Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>

>  drivers/net/ethernet/ti/icssg/icss_iep.c | 74 ++++++++++++++++++++++++++++++++
>  1 file changed, 74 insertions(+)
> 
> diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
> index 3025e9c18970..b076be9c527c 100644
> --- a/drivers/net/ethernet/ti/icssg/icss_iep.c
> +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
> @@ -17,6 +17,7 @@
>  #include <linux/timekeeping.h>
>  #include <linux/interrupt.h>
>  #include <linux/of_irq.h>
> +#include <linux/workqueue.h>
>  
>  #include "icss_iep.h"
>  
> @@ -122,6 +123,7 @@ struct icss_iep {
>  	int cap_cmp_irq;
>  	u64 period;
>  	u32 latch_enable;
> +	struct work_struct work;
>  };
>  
>  /**
> @@ -571,6 +573,57 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
>  	return ret;
>  }
>  
> +static void icss_iep_cap_cmp_work(struct work_struct *work)
> +{
> +	struct icss_iep *iep = container_of(work, struct icss_iep, work);
> +	const u32 *reg_offs = iep->plat_data->reg_offs;
> +	struct ptp_clock_event pevent;
> +	unsigned int val;
> +	u64 ns, ns_next;
> +
> +	spin_lock(&iep->irq_lock);
> +
> +	ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
> +	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
> +		val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
> +		ns |= (u64)val << 32;
> +	}
> +	/* set next event */
> +	ns_next = ns + iep->period;
> +	writel(lower_32_bits(ns_next),
> +	       iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
> +	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
> +		writel(upper_32_bits(ns_next),
> +		       iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
> +
> +	pevent.pps_times.ts_real = ns_to_timespec64(ns);
> +	pevent.type = PTP_CLOCK_PPSUSR;
> +	pevent.index = 0;
> +	ptp_clock_event(iep->ptp_clock, &pevent);
> +	dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
> +
> +	spin_unlock(&iep->irq_lock);
> +}
> +
> +static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
> +{
> +	struct icss_iep *iep = (struct icss_iep *)dev_id;
> +	const u32 *reg_offs = iep->plat_data->reg_offs;
> +	unsigned int val;
> +
> +	val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
> +	/* The driver only enables CMP1 */
> +	if (val & BIT(1)) {
> +		/* Clear the event */
> +		writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
> +		if (iep->pps_enabled || iep->perout_enabled)
> +			schedule_work(&iep->work);
> +		return IRQ_HANDLED;
> +	}
> +
> +	return IRQ_NONE;
> +}
> +
>  static int icss_iep_pps_enable(struct icss_iep *iep, int on)
>  {
>  	struct ptp_clock_request rq;
> @@ -602,6 +655,8 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
>  		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
>  	} else {
>  		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
> +		if (iep->cap_cmp_irq)
> +			cancel_work_sync(&iep->work);
>  	}
>  
>  	if (!ret)
> @@ -777,6 +832,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
>  	if (iep->ops && iep->ops->perout_enable) {
>  		iep->ptp_info.n_per_out = 1;
>  		iep->ptp_info.pps = 1;
> +	} else if (iep->cap_cmp_irq) {
> +		iep->ptp_info.pps = 1;
>  	}
>  
>  	if (iep->ops && iep->ops->extts_enable)
> @@ -817,6 +874,7 @@ static int icss_iep_probe(struct platform_device *pdev)
>  	struct device *dev = &pdev->dev;
>  	struct icss_iep *iep;
>  	struct clk *iep_clk;
> +	int ret, irq;
>  
>  	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
>  	if (!iep)
> @@ -827,6 +885,22 @@ static int icss_iep_probe(struct platform_device *pdev)
>  	if (IS_ERR(iep->base))
>  		return -ENODEV;
>  
> +	irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
> +	if (irq == -EPROBE_DEFER)
> +		return irq;
> +
> +	if (irq > 0) {
> +		ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
> +				       IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
> +		if (ret) {
> +			dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
> +				 ret);
> +		} else {
> +			iep->cap_cmp_irq = irq;
> +			INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
> +		}
> +	}
> +
>  	iep_clk = devm_clk_get(dev, NULL);
>  	if (IS_ERR(iep_clk))
>  		return PTR_ERR(iep_clk);
>
Paolo Abeni June 6, 2024, 10:32 a.m. UTC | #2
On Tue, 2024-06-04 at 14:15 +0100, Diogo Ivo wrote:
> @@ -571,6 +573,57 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
>  	return ret;
>  }
>  
> +static void icss_iep_cap_cmp_work(struct work_struct *work)
> +{
> +	struct icss_iep *iep = container_of(work, struct icss_iep, work);
> +	const u32 *reg_offs = iep->plat_data->reg_offs;
> +	struct ptp_clock_event pevent;
> +	unsigned int val;
> +	u64 ns, ns_next;
> +
> +	spin_lock(&iep->irq_lock);

'irq_lock' is always acquired with the irqsave variant, and here we are
in process context. This discrepancy would at least deserve a comment;
likely the above lock type is not correct.

Cheers,

Paolo
Diogo Ivo June 6, 2024, 1:28 p.m. UTC | #3
Hi Paolo,

On 6/6/24 11:32 AM, Paolo Abeni wrote:
> On Tue, 2024-06-04 at 14:15 +0100, Diogo Ivo wrote:
>> @@ -571,6 +573,57 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
>>   	return ret;
>>   }
>>   
>> +static void icss_iep_cap_cmp_work(struct work_struct *work)
>> +{
>> +	struct icss_iep *iep = container_of(work, struct icss_iep, work);
>> +	const u32 *reg_offs = iep->plat_data->reg_offs;
>> +	struct ptp_clock_event pevent;
>> +	unsigned int val;
>> +	u64 ns, ns_next;
>> +
>> +	spin_lock(&iep->irq_lock);
> 
> 'irq_lock' is always acquired with the irqsave variant, and here we are
> in process context. This discrepancy would at least deserve a comment;
> likely the above lock type is not correct.

If my reasoning is correct I believe this variant is correct here. The
register accesses in the IRQ handler and icss_iep_cap_cmp_work() are
orthogonal, so there should be no need to guard against the IRQ handler
here. This is the case for the other places where the _irqsave() variant
is used, so using the _irqsave() variant is overkill there.

 From my understanding this is a remnant of the SDK's version of the
driver, where all of the processing now done in icss_iep_cap_cmp_work()
was done directly in the IRQ handler, meaning that we had to guard
against some other thread calling icss_iep_ptp_enable() and accessing
for example ICSS_IEP_CMP1_REG0 concurrently. This can be seen in the
comment on line 114:

struct icss_iep {
  ...
	spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
  ...
};

For v3 I can add a comment with a condensed version of this argument in
icss_iep_cap_cmp_work().

With this said it should be possible to change this spinlock to a mutex as
well, since all the possibilities for concurrency happen outside of
interrupt context. I can add a patch to this series doing that if you
agree with my reasoning above and find it beneficial. For this some
comments from TI would also be good to have in case I missed something
or there is some other factor that I am not aware of.

Best regards,
Diogo
Paolo Abeni June 6, 2024, 1:49 p.m. UTC | #4
On Thu, 2024-06-06 at 14:28 +0100, Diogo Ivo wrote:
> On 6/6/24 11:32 AM, Paolo Abeni wrote:
> > On Tue, 2024-06-04 at 14:15 +0100, Diogo Ivo wrote:
> > > @@ -571,6 +573,57 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
> > >   	return ret;
> > >   }
> > >   
> > > +static void icss_iep_cap_cmp_work(struct work_struct *work)
> > > +{
> > > +	struct icss_iep *iep = container_of(work, struct icss_iep, work);
> > > +	const u32 *reg_offs = iep->plat_data->reg_offs;
> > > +	struct ptp_clock_event pevent;
> > > +	unsigned int val;
> > > +	u64 ns, ns_next;
> > > +
> > > +	spin_lock(&iep->irq_lock);
> > 
> > 'irq_lock' is always acquired with the irqsave variant, and here we are
> > in process context. This discrepancy would at least deserve a comment;
> > likely the above lock type is not correct.
> 
> If my reasoning is correct I believe this variant is correct here. The
> register accesses in the IRQ handler and icss_iep_cap_cmp_work() are
> orthogonal, so there should be no need to guard against the IRQ handler
> here. This is the case for the other places where the _irqsave() variant
> is used, so using the _irqsave() variant is overkill there.
> 
>  From my understanding this is a remnant of the SDK's version of the
> driver, where all of the processing now done in icss_iep_cap_cmp_work()
> was done directly in the IRQ handler, meaning that we had to guard
> against some other thread calling icss_iep_ptp_enable() and accessing
> for example ICSS_IEP_CMP1_REG0 concurrently. This can be seen in the
> comment on line 114:
> 
> struct icss_iep {
>   ...
> 	spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
>   ...
> };
> 
> For v3 I can add a comment with a condensed version of this argument in
> icss_iep_cap_cmp_work().

Please have run with LOCKDEP enabled, I think it should splat with the
mix of plain spinlock and spinlock_irqsave this patch brings in.

> With this said it should be possible to change this spinlock to a mutex as
> well, since all the possibilities for concurrency happen outside of
> interrupt context. I can add a patch to this series doing that if you
> agree with my reasoning above and find it beneficial. For this some
> comments from TI would also be good to have in case I missed something
> or there is some other factor that I am not aware of.

It looks like that most critical section protected by iep->irq_lock are
already under ptp_clk_mutex protection. AFAICS all except the one
introduced by this patch.

If so, you could acquire such mutex even in icss_iep_cap_cmp_work() and
completely remove iep->irq_lock.

Cheers,

Paolo
Diogo Ivo June 6, 2024, 3:43 p.m. UTC | #5
On 6/6/24 2:49 PM, Paolo Abeni wrote:
> On Thu, 2024-06-06 at 14:28 +0100, Diogo Ivo wrote:
>> With this said it should be possible to change this spinlock to a mutex as
>> well, since all the possibilities for concurrency happen outside of
>> interrupt context. I can add a patch to this series doing that if you
>> agree with my reasoning above and find it beneficial. For this some
>> comments from TI would also be good to have in case I missed something
>> or there is some other factor that I am not aware of.
> 
> It looks like that most critical section protected by iep->irq_lock are
> already under ptp_clk_mutex protection. AFAICS all except the one
> introduced by this patch.
> 
> If so, you could acquire such mutex even in icss_iep_cap_cmp_work() and
> completely remove iep->irq_lock.

That is a much better approach, I'll do that and post v3.

Best regards,
Diogo
diff mbox series

Patch

diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 3025e9c18970..b076be9c527c 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -17,6 +17,7 @@ 
 #include <linux/timekeeping.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
+#include <linux/workqueue.h>
 
 #include "icss_iep.h"
 
@@ -122,6 +123,7 @@  struct icss_iep {
 	int cap_cmp_irq;
 	u64 period;
 	u32 latch_enable;
+	struct work_struct work;
 };
 
 /**
@@ -571,6 +573,57 @@  static int icss_iep_perout_enable(struct icss_iep *iep,
 	return ret;
 }
 
+static void icss_iep_cap_cmp_work(struct work_struct *work)
+{
+	struct icss_iep *iep = container_of(work, struct icss_iep, work);
+	const u32 *reg_offs = iep->plat_data->reg_offs;
+	struct ptp_clock_event pevent;
+	unsigned int val;
+	u64 ns, ns_next;
+
+	spin_lock(&iep->irq_lock);
+
+	ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+		val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+		ns |= (u64)val << 32;
+	}
+	/* set next event */
+	ns_next = ns + iep->period;
+	writel(lower_32_bits(ns_next),
+	       iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+		writel(upper_32_bits(ns_next),
+		       iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+
+	pevent.pps_times.ts_real = ns_to_timespec64(ns);
+	pevent.type = PTP_CLOCK_PPSUSR;
+	pevent.index = 0;
+	ptp_clock_event(iep->ptp_clock, &pevent);
+	dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
+
+	spin_unlock(&iep->irq_lock);
+}
+
+static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
+{
+	struct icss_iep *iep = (struct icss_iep *)dev_id;
+	const u32 *reg_offs = iep->plat_data->reg_offs;
+	unsigned int val;
+
+	val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+	/* The driver only enables CMP1 */
+	if (val & BIT(1)) {
+		/* Clear the event */
+		writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+		if (iep->pps_enabled || iep->perout_enabled)
+			schedule_work(&iep->work);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
 static int icss_iep_pps_enable(struct icss_iep *iep, int on)
 {
 	struct ptp_clock_request rq;
@@ -602,6 +655,8 @@  static int icss_iep_pps_enable(struct icss_iep *iep, int on)
 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
 	} else {
 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+		if (iep->cap_cmp_irq)
+			cancel_work_sync(&iep->work);
 	}
 
 	if (!ret)
@@ -777,6 +832,8 @@  int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
 	if (iep->ops && iep->ops->perout_enable) {
 		iep->ptp_info.n_per_out = 1;
 		iep->ptp_info.pps = 1;
+	} else if (iep->cap_cmp_irq) {
+		iep->ptp_info.pps = 1;
 	}
 
 	if (iep->ops && iep->ops->extts_enable)
@@ -817,6 +874,7 @@  static int icss_iep_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct icss_iep *iep;
 	struct clk *iep_clk;
+	int ret, irq;
 
 	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
 	if (!iep)
@@ -827,6 +885,22 @@  static int icss_iep_probe(struct platform_device *pdev)
 	if (IS_ERR(iep->base))
 		return -ENODEV;
 
+	irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
+	if (irq == -EPROBE_DEFER)
+		return irq;
+
+	if (irq > 0) {
+		ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
+				       IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
+		if (ret) {
+			dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
+				 ret);
+		} else {
+			iep->cap_cmp_irq = irq;
+			INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
+		}
+	}
+
 	iep_clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(iep_clk))
 		return PTR_ERR(iep_clk);