diff mbox series

[net-next] net: fec: Convert fec driver to use lock guards

Message ID 20240507090520.284821-1-wei.fang@nxp.com (mailing list archive)
State Superseded
Headers show
Series [net-next] net: fec: Convert fec driver to use lock guards | expand

Commit Message

Wei Fang May 7, 2024, 9:05 a.m. UTC
Use guard() and scoped_guard() defined in linux/cleanup.h to automate
lock lifetime control in fec driver.

Signed-off-by: Wei Fang <wei.fang@nxp.com>
---
 drivers/net/ethernet/freescale/fec_main.c |  37 ++++----
 drivers/net/ethernet/freescale/fec_ptp.c  | 104 +++++++++-------------
 2 files changed, 58 insertions(+), 83 deletions(-)

Comments

Eric Dumazet May 7, 2024, 10:39 a.m. UTC | #1
On Tue, May 7, 2024 at 11:16 AM Wei Fang <wei.fang@nxp.com> wrote:
>
> Use guard() and scoped_guard() defined in linux/cleanup.h to automate
> lock lifetime control in fec driver.
>
> Signed-off-by: Wei Fang <wei.fang@nxp.com>
>

To me, this looks like a nice recipe for future disasters when doing backports,
because I am pretty sure the "goto ..." that assumes the lock is
magically released
will fail horribly.

I would use scoped_guard() only for new code.
Suman Ghosh May 7, 2024, 11:55 a.m. UTC | #2
> drivers/net/ethernet/freescale/fec_main.c |  37 ++++----
>drivers/net/ethernet/freescale/fec_ptp.c  | 104 +++++++++-------------
> 2 files changed, 58 insertions(+), 83 deletions(-)
>
>diff --git a/drivers/net/ethernet/freescale/fec_main.c
>b/drivers/net/ethernet/freescale/fec_main.c
>index 8bd213da8fb6..5f98c0615115 100644
>--- a/drivers/net/ethernet/freescale/fec_main.c
>+++ b/drivers/net/ethernet/freescale/fec_main.c
>@@ -1397,12 +1397,11 @@ static void
> fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
> 	struct skb_shared_hwtstamps *hwtstamps)  {
>-	unsigned long flags;
> 	u64 ns;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>-	ns = timecounter_cyc2time(&fep->tc, ts);
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>+	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
>+		ns = timecounter_cyc2time(&fep->tc, ts);
>+	}
>
> 	memset(hwtstamps, 0, sizeof(*hwtstamps));
> 	hwtstamps->hwtstamp = ns_to_ktime(ns); @@ -2313,15 +2312,13 @@ static
>int fec_enet_clk_enable(struct net_device *ndev, bool enable)
> 			return ret;
>
> 		if (fep->clk_ptp) {
>-			mutex_lock(&fep->ptp_clk_mutex);
>-			ret = clk_prepare_enable(fep->clk_ptp);
>-			if (ret) {
>-				mutex_unlock(&fep->ptp_clk_mutex);
>-				goto failed_clk_ptp;
>-			} else {
>-				fep->ptp_clk_on = true;
>+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
>+				ret = clk_prepare_enable(fep->clk_ptp);
>+				if (ret)
>+					goto failed_clk_ptp;
>+				else
>+					fep->ptp_clk_on = true;
> 			}
>-			mutex_unlock(&fep->ptp_clk_mutex);
> 		}
>
> 		ret = clk_prepare_enable(fep->clk_ref); @@ -2336,10 +2333,10 @@
>static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
> 	} else {
> 		clk_disable_unprepare(fep->clk_enet_out);
> 		if (fep->clk_ptp) {
>-			mutex_lock(&fep->ptp_clk_mutex);
>-			clk_disable_unprepare(fep->clk_ptp);
>-			fep->ptp_clk_on = false;
>-			mutex_unlock(&fep->ptp_clk_mutex);
>+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
>+				clk_disable_unprepare(fep->clk_ptp);
>+				fep->ptp_clk_on = false;
>+			}
> 		}
> 		clk_disable_unprepare(fep->clk_ref);
> 		clk_disable_unprepare(fep->clk_2x_txclk);
>@@ -2352,10 +2349,10 @@ static int fec_enet_clk_enable(struct net_device
>*ndev, bool enable)
> 		clk_disable_unprepare(fep->clk_ref);
> failed_clk_ref:
> 	if (fep->clk_ptp) {
>-		mutex_lock(&fep->ptp_clk_mutex);
>-		clk_disable_unprepare(fep->clk_ptp);
>-		fep->ptp_clk_on = false;
>-		mutex_unlock(&fep->ptp_clk_mutex);
>+		scoped_guard(mutex, &fep->ptp_clk_mutex) {
[Suman] Hi Wei,
I am new to the use of scoped_guard() and have a confusion here. Above, "goto failed_clk_ref" is getting called after scoped_guard() declaration and you are again doing scoped_guard() inside the goto label failed_clk_ref. Why is this double declaration needed?
>+			clk_disable_unprepare(fep->clk_ptp);
>+			fep->ptp_clk_on = false;
>+		}
> 	}
> failed_clk_ptp:
> 	clk_disable_unprepare(fep->clk_enet_out);
>diff --git a/drivers/net/ethernet/freescale/fec_ptp.c
>b/drivers/net/ethernet/freescale/fec_ptp.c
>index 181d9bfbee22..ed64e077a64a 100644
>--- a/drivers/net/ethernet/freescale/fec_ptp.c
>+++ b/drivers/net/ethernet/freescale/fec_ptp.c
>@@ -99,18 +99,17 @@
>  */
> static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
>{
>-	unsigned long flags;
> 	u32 val, tempval;
> 	struct timespec64 ts;
> 	u64 ns;
>
>-	if (fep->pps_enable == enable)
>-		return 0;
>-
> 	fep->pps_channel = DEFAULT_PPS_CHANNEL;
> 	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
>+
>+	if (fep->pps_enable == enable)
>+		return 0;
>
> 	if (enable) {
> 		/* clear capture or output compare interrupt status if have.
>@@ -195,7 +194,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private
>*fep, uint enable)
> 	}
>
> 	fep->pps_enable = enable;
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>
> 	return 0;
> }
>@@ -204,9 +202,8 @@ static int fec_ptp_pps_perout(struct fec_enet_private
>*fep)  {
> 	u32 compare_val, ptp_hc, temp_val;
> 	u64 curr_time;
>-	unsigned long flags;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
>
> 	/* Update time counter */
> 	timecounter_read(&fep->tc);
>@@ -229,7 +226,6 @@ static int fec_ptp_pps_perout(struct fec_enet_private
>*fep)
> 	 */
> 	if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
> 		dev_err(&fep->pdev->dev, "Current time is too close to the start
>time!\n");
>-		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
> 		return -1;
> 	}
>
>@@ -257,7 +253,6 @@ static int fec_ptp_pps_perout(struct fec_enet_private
>*fep)
> 	 */
> 	writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
> 	fep->next_counter = (fep->next_counter + fep->reload_period) & fep-
>>cc.mask;
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>
> 	return 0;
> }
>@@ -307,13 +302,12 @@ static u64 fec_ptp_read(const struct cyclecounter
>*cc)  void fec_ptp_start_cyclecounter(struct net_device *ndev)  {
> 	struct fec_enet_private *fep = netdev_priv(ndev);
>-	unsigned long flags;
> 	int inc;
>
> 	inc = 1000000000 / fep->cycle_speed;
>
> 	/* grab the ptp lock */
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
>
> 	/* 1ns counter */
> 	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); @@ -332,8
>+326,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
>
> 	/* reset the ns time counter */
> 	timecounter_init(&fep->tc, &fep->cc, 0);
>-
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
> }
>
> /**
>@@ -352,7 +344,6 @@ void fec_ptp_start_cyclecounter(struct net_device
>*ndev)  static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long
>scaled_ppm)  {
> 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
>-	unsigned long flags;
> 	int neg_adj = 0;
> 	u32 i, tmp;
> 	u32 corr_inc, corr_period;
>@@ -397,7 +388,7 @@ static int fec_ptp_adjfine(struct ptp_clock_info *ptp,
>long scaled_ppm)
> 	else
> 		corr_ns = fep->ptp_inc + corr_inc;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
>
> 	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
> 	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; @@ -407,8 +398,6 @@ static
>int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
> 	/* dummy read to update the timer. */
> 	timecounter_read(&fep->tc);
>
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>-
> 	return 0;
> }
>
>@@ -423,11 +412,9 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp,
>s64 delta)  {
> 	struct fec_enet_private *fep =
> 	    container_of(ptp, struct fec_enet_private, ptp_caps);
>-	unsigned long flags;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
> 	timecounter_adjtime(&fep->tc, delta);
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>
> 	return 0;
> }
>@@ -445,18 +432,16 @@ static int fec_ptp_gettime(struct ptp_clock_info
>*ptp, struct timespec64 *ts)
> 	struct fec_enet_private *fep =
> 	    container_of(ptp, struct fec_enet_private, ptp_caps);
> 	u64 ns;
>-	unsigned long flags;
>
>-	mutex_lock(&fep->ptp_clk_mutex);
>-	/* Check the ptp clock */
>-	if (!fep->ptp_clk_on) {
>-		mutex_unlock(&fep->ptp_clk_mutex);
>-		return -EINVAL;
>+	scoped_guard(mutex, &fep->ptp_clk_mutex) {
>+		/* Check the ptp clock */
>+		if (!fep->ptp_clk_on)
>+			return -EINVAL;
>+
>+		scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
>+			ns = timecounter_read(&fep->tc);
>+		}
> 	}
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>-	ns = timecounter_read(&fep->tc);
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>-	mutex_unlock(&fep->ptp_clk_mutex);
>
> 	*ts = ns_to_timespec64(ns);
>
>@@ -478,15 +463,12 @@ static int fec_ptp_settime(struct ptp_clock_info
>*ptp,
> 	    container_of(ptp, struct fec_enet_private, ptp_caps);
>
> 	u64 ns;
>-	unsigned long flags;
> 	u32 counter;
>
>-	mutex_lock(&fep->ptp_clk_mutex);
>+	guard(mutex)(&fep->ptp_clk_mutex);
> 	/* Check the ptp clock */
>-	if (!fep->ptp_clk_on) {
>-		mutex_unlock(&fep->ptp_clk_mutex);
>+	if (!fep->ptp_clk_on)
> 		return -EINVAL;
>-	}
>
> 	ns = timespec64_to_ns(ts);
> 	/* Get the timer value based on timestamp.
>@@ -494,21 +476,18 @@ static int fec_ptp_settime(struct ptp_clock_info
>*ptp,
> 	 */
> 	counter = ns & fep->cc.mask;
>
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>-	writel(counter, fep->hwp + FEC_ATIME);
>-	timecounter_init(&fep->tc, &fep->cc, ns);
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>-	mutex_unlock(&fep->ptp_clk_mutex);
>+	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
>+		writel(counter, fep->hwp + FEC_ATIME);
>+		timecounter_init(&fep->tc, &fep->cc, ns);
>+	}
>+
> 	return 0;
> }
>
> static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
>{
>-	unsigned long flags;
>-
>-	spin_lock_irqsave(&fep->tmreg_lock, flags);
>+	guard(spinlock_irqsave)(&fep->tmreg_lock);
> 	writel(0, fep->hwp + FEC_TCSR(channel));
>-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>
> 	return 0;
> }
>@@ -528,7 +507,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
> 	ktime_t timeout;
> 	struct timespec64 start_time, period;
> 	u64 curr_time, delta, period_ns;
>-	unsigned long flags;
> 	int ret = 0;
>
> 	if (rq->type == PTP_CLK_REQ_PPS) {
>@@ -563,17 +541,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
> 			start_time.tv_nsec = rq->perout.start.nsec;
> 			fep->perout_stime = timespec64_to_ns(&start_time);
>
>-			mutex_lock(&fep->ptp_clk_mutex);
>-			if (!fep->ptp_clk_on) {
>-				dev_err(&fep->pdev->dev, "Error: PTP clock is
>closed!\n");
>-				mutex_unlock(&fep->ptp_clk_mutex);
>-				return -EOPNOTSUPP;
>+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
>+				if (!fep->ptp_clk_on) {
>+					dev_err(&fep->pdev->dev,
>+						"Error: PTP clock is closed!\n");
>+					return -EOPNOTSUPP;
>+				}
>+
>+				scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
>+					/* Read current timestamp */
>+					curr_time = timecounter_read(&fep->tc);
>+				}
> 			}
>-			spin_lock_irqsave(&fep->tmreg_lock, flags);
>-			/* Read current timestamp */
>-			curr_time = timecounter_read(&fep->tc);
>-			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>-			mutex_unlock(&fep->ptp_clk_mutex);
>
> 			/* Calculate time difference */
> 			delta = fep->perout_stime - curr_time; @@ -653,15 +632,14 @@
>static void fec_time_keep(struct work_struct *work)  {
> 	struct delayed_work *dwork = to_delayed_work(work);
> 	struct fec_enet_private *fep = container_of(dwork, struct
>fec_enet_private, time_keep);
>-	unsigned long flags;
>
>-	mutex_lock(&fep->ptp_clk_mutex);
>-	if (fep->ptp_clk_on) {
>-		spin_lock_irqsave(&fep->tmreg_lock, flags);
>-		timecounter_read(&fep->tc);
>-		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
>+	scoped_guard(mutex, &fep->ptp_clk_mutex) {
>+		if (fep->ptp_clk_on) {
>+			scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
>+				timecounter_read(&fep->tc);
>+			}
>+		}
> 	}
>-	mutex_unlock(&fep->ptp_clk_mutex);
>
> 	schedule_delayed_work(&fep->time_keep, HZ);  }
>--
>2.34.1
>
Andrew Lunn May 7, 2024, 2:01 p.m. UTC | #3
On Tue, May 07, 2024 at 05:05:20PM +0800, Wei Fang wrote:
> Use guard() and scoped_guard() defined in linux/cleanup.h to automate
> lock lifetime control in fec driver.

You are probably the first to use these in netdev. Or one of the very
early adopters. As such, you should explain in a bit more detail why
these changes are safe. 

> -	spin_lock_irqsave(&fep->tmreg_lock, flags);
> -	ns = timecounter_cyc2time(&fep->tc, ts);
> -	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
> +	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
> +		ns = timecounter_cyc2time(&fep->tc, ts);
> +	}

This looks fine.

> -			mutex_lock(&fep->ptp_clk_mutex);
> -			ret = clk_prepare_enable(fep->clk_ptp);
> -			if (ret) {
> -				mutex_unlock(&fep->ptp_clk_mutex);
> -				goto failed_clk_ptp;
> -			} else {
> -				fep->ptp_clk_on = true;
> +			scoped_guard(mutex, &fep->ptp_clk_mutex) {
> +				ret = clk_prepare_enable(fep->clk_ptp);
> +				if (ret)
> +					goto failed_clk_ptp;
> +				else
> +					fep->ptp_clk_on = true;
>  			}

As Eric pointed out, it is not obvious what the semantics are
here. You are leaving the scope, so i hope it does not matter it is a
goto you are using to leave the scope. But a quick search did not find
anything to confirm this. So i would like to see some justification in
the commit message this is safe.

> +++ b/drivers/net/ethernet/freescale/fec_ptp.c
> @@ -99,18 +99,17 @@
>   */
>  static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
>  {
> -	unsigned long flags;
>  	u32 val, tempval;
>  	struct timespec64 ts;
>  	u64 ns;
>  
> -	if (fep->pps_enable == enable)
> -		return 0;
> -
>  	fep->pps_channel = DEFAULT_PPS_CHANNEL;
>  	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
>  
> -	spin_lock_irqsave(&fep->tmreg_lock, flags);
> +	guard(spinlock_irqsave)(&fep->tmreg_lock);
> +
> +	if (fep->pps_enable == enable)
> +		return 0;

This is not obviously correct. Why has this condition moved?

I also personally don't like guard(). scoped_guard() {} is much easier
to understand.

In order to get my Reviewed-by: you need to drop all the plain guard()
calls. I'm also not sure as a community we want to see changes like
this.

	Andrew
Wei Fang May 8, 2024, 2:41 a.m. UTC | #4
> -----Original Message-----
> From: Eric Dumazet <edumazet@google.com>
> Sent: 2024年5月7日 18:40
> To: Wei Fang <wei.fang@nxp.com>
> Cc: davem@davemloft.net; kuba@kernel.org; pabeni@redhat.com; Shenwei
> Wang <shenwei.wang@nxp.com>; Clark Wang <xiaoning.wang@nxp.com>;
> richardcochran@gmail.com; netdev@vger.kernel.org;
> linux-kernel@vger.kernel.org; imx@lists.linux.dev
> Subject: Re: [PATCH net-next] net: fec: Convert fec driver to use lock guards
> 
> On Tue, May 7, 2024 at 11:16 AM Wei Fang <wei.fang@nxp.com> wrote:
> >
> > Use guard() and scoped_guard() defined in linux/cleanup.h to automate
> > lock lifetime control in fec driver.
> >
> > Signed-off-by: Wei Fang <wei.fang@nxp.com>
> >
> 
> To me, this looks like a nice recipe for future disasters when doing backports,
> because I am pretty sure the "goto ..." that assumes the lock is magically
> released will fail horribly.
> 
> I would use scoped_guard() only for new code.

Now that the kernel already supports scope-based resource management,
I think we should actively use this new mechanism. At least the result could
be safer resource management in the kernel and a lot fewer gotos.
Wei Fang May 8, 2024, 2:46 a.m. UTC | #5
> -----Original Message-----
> From: Suman Ghosh <sumang@marvell.com>
> Sent: 2024年5月7日 19:55
> To: Wei Fang <wei.fang@nxp.com>; davem@davemloft.net;
> edumazet@google.com; kuba@kernel.org; pabeni@redhat.com; Shenwei
> Wang <shenwei.wang@nxp.com>; Clark Wang <xiaoning.wang@nxp.com>;
> richardcochran@gmail.com; netdev@vger.kernel.org
> Cc: linux-kernel@vger.kernel.org; imx@lists.linux.dev
> Subject: RE: [EXTERNAL] [PATCH net-next] net: fec: Convert fec driver to use
> lock guards
> 
> > drivers/net/ethernet/freescale/fec_main.c |  37 ++++----
> >drivers/net/ethernet/freescale/fec_ptp.c  | 104 +++++++++-------------
> > 2 files changed, 58 insertions(+), 83 deletions(-)
> >
> >diff --git a/drivers/net/ethernet/freescale/fec_main.c
> >b/drivers/net/ethernet/freescale/fec_main.c
> >index 8bd213da8fb6..5f98c0615115 100644
> >--- a/drivers/net/ethernet/freescale/fec_main.c
> >+++ b/drivers/net/ethernet/freescale/fec_main.c
> >@@ -1397,12 +1397,11 @@ static void
> > fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
> > 	struct skb_shared_hwtstamps *hwtstamps)  {
> >-	unsigned long flags;
> > 	u64 ns;
> >
> >-	spin_lock_irqsave(&fep->tmreg_lock, flags);
> >-	ns = timecounter_cyc2time(&fep->tc, ts);
> >-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
> >+	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
> >+		ns = timecounter_cyc2time(&fep->tc, ts);
> >+	}
> >
> > 	memset(hwtstamps, 0, sizeof(*hwtstamps));
> > 	hwtstamps->hwtstamp = ns_to_ktime(ns); @@ -2313,15 +2312,13 @@
> static
> >int fec_enet_clk_enable(struct net_device *ndev, bool enable)
> > 			return ret;
> >
> > 		if (fep->clk_ptp) {
> >-			mutex_lock(&fep->ptp_clk_mutex);
> >-			ret = clk_prepare_enable(fep->clk_ptp);
> >-			if (ret) {
> >-				mutex_unlock(&fep->ptp_clk_mutex);
> >-				goto failed_clk_ptp;
> >-			} else {
> >-				fep->ptp_clk_on = true;
> >+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
> >+				ret = clk_prepare_enable(fep->clk_ptp);
> >+				if (ret)
> >+					goto failed_clk_ptp;
> >+				else
> >+					fep->ptp_clk_on = true;
> > 			}
> >-			mutex_unlock(&fep->ptp_clk_mutex);
> > 		}
> >
> > 		ret = clk_prepare_enable(fep->clk_ref); @@ -2336,10 +2333,10
> @@
> >static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
> > 	} else {
> > 		clk_disable_unprepare(fep->clk_enet_out);
> > 		if (fep->clk_ptp) {
> >-			mutex_lock(&fep->ptp_clk_mutex);
> >-			clk_disable_unprepare(fep->clk_ptp);
> >-			fep->ptp_clk_on = false;
> >-			mutex_unlock(&fep->ptp_clk_mutex);
> >+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
> >+				clk_disable_unprepare(fep->clk_ptp);
> >+				fep->ptp_clk_on = false;
> >+			}
> > 		}
> > 		clk_disable_unprepare(fep->clk_ref);
> > 		clk_disable_unprepare(fep->clk_2x_txclk);
> >@@ -2352,10 +2349,10 @@ static int fec_enet_clk_enable(struct
> net_device
> >*ndev, bool enable)
> > 		clk_disable_unprepare(fep->clk_ref);
> > failed_clk_ref:
> > 	if (fep->clk_ptp) {
> >-		mutex_lock(&fep->ptp_clk_mutex);
> >-		clk_disable_unprepare(fep->clk_ptp);
> >-		fep->ptp_clk_on = false;
> >-		mutex_unlock(&fep->ptp_clk_mutex);
> >+		scoped_guard(mutex, &fep->ptp_clk_mutex) {
> [Suman] Hi Wei,
> I am new to the use of scoped_guard() and have a confusion here. Above,
> "goto failed_clk_ref" is getting called after scoped_guard() declaration and
> you are again doing scoped_guard() inside the goto label failed_clk_ref. Why
> is this double declaration needed?

The lock will be freed when it goes out of scope. And the second scope_guard() is
not nested in the first scope_guard().

> >+			clk_disable_unprepare(fep->clk_ptp);
> >+			fep->ptp_clk_on = false;
> >+		}
> > 	}
> > failed_clk_ptp:
> > 	clk_disable_unprepare(fep->clk_enet_out);
Wei Fang May 8, 2024, 3:29 a.m. UTC | #6
> -----Original Message-----
> From: Andrew Lunn <andrew@lunn.ch>
> Sent: 2024年5月7日 22:01
> To: Wei Fang <wei.fang@nxp.com>
> Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org;
> pabeni@redhat.com; Shenwei Wang <shenwei.wang@nxp.com>; Clark Wang
> <xiaoning.wang@nxp.com>; richardcochran@gmail.com;
> netdev@vger.kernel.org; linux-kernel@vger.kernel.org; imx@lists.linux.dev
> Subject: Re: [PATCH net-next] net: fec: Convert fec driver to use lock guards
> 
> On Tue, May 07, 2024 at 05:05:20PM +0800, Wei Fang wrote:
> > Use guard() and scoped_guard() defined in linux/cleanup.h to automate
> > lock lifetime control in fec driver.
> 
> You are probably the first to use these in netdev. Or one of the very
> early adopters. As such, you should explain in a bit more detail why
> these changes are safe.
>
Okay, I can add more in the commit message.

> > -	spin_lock_irqsave(&fep->tmreg_lock, flags);
> > -	ns = timecounter_cyc2time(&fep->tc, ts);
> > -	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
> > +	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
> > +		ns = timecounter_cyc2time(&fep->tc, ts);
> > +	}
> 
> This looks fine.
> 
> > -			mutex_lock(&fep->ptp_clk_mutex);
> > -			ret = clk_prepare_enable(fep->clk_ptp);
> > -			if (ret) {
> > -				mutex_unlock(&fep->ptp_clk_mutex);
> > -				goto failed_clk_ptp;
> > -			} else {
> > -				fep->ptp_clk_on = true;
> > +			scoped_guard(mutex, &fep->ptp_clk_mutex) {
> > +				ret = clk_prepare_enable(fep->clk_ptp);
> > +				if (ret)
> > +					goto failed_clk_ptp;
> > +				else
> > +					fep->ptp_clk_on = true;
> >  			}
> 
> As Eric pointed out, it is not obvious what the semantics are
> here. You are leaving the scope, so i hope it does not matter it is a
> goto you are using to leave the scope. But a quick search did not find
> anything to confirm this. So i would like to see some justification in
> the commit message this is safe.
>
According to the explanation of the cleanup attribute of gcc [1] and clang [2],
the cleanup attribute runs a function when the variable goes out of scope. So
the lock will be freed when leaving the scope.
In addition, I have seen cases of using goto statements in scope_guard() in
the gpiolib driver [3].

[1] https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
[2] https://clang.llvm.org/docs/AttributeReference.html#cleanup
[3] https://elixir.bootlin.com/linux/v6.9-rc7/source/drivers/gpio/gpiolib.c#L930

> > +++ b/drivers/net/ethernet/freescale/fec_ptp.c
> > @@ -99,18 +99,17 @@
> >   */
> >  static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
> >  {
> > -	unsigned long flags;
> >  	u32 val, tempval;
> >  	struct timespec64 ts;
> >  	u64 ns;
> >
> > -	if (fep->pps_enable == enable)
> > -		return 0;
> > -
> >  	fep->pps_channel = DEFAULT_PPS_CHANNEL;
> >  	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
> >
> > -	spin_lock_irqsave(&fep->tmreg_lock, flags);
> > +	guard(spinlock_irqsave)(&fep->tmreg_lock);
> > +
> > +	if (fep->pps_enable == enable)
> > +		return 0;
> 
> This is not obviously correct. Why has this condition moved?
> 
As you see, the assignment of ' pps_enable ' is protected by the 'tmreg_lock',
But the read operation of 'pps_enable' was not protected by the lock, so the
Coverity tool will complain a LOCK EVASION warning which may cause data
race to occur when running in a multithreaded environment. Of course, this
data race issue is almost impossible, so I modified it by the way. Because I don't
really want to fix it through another patch, unless you insist on doing so.

> I also personally don't like guard(). scoped_guard() {} is much easier
> to understand.
> 
If the scope of the lock is from the time it is acquired until the function returns,
I think guard() is simpler. Of course, you may think scope_guard() is more reasonable
based on other considerations.

> In order to get my Reviewed-by: you need to drop all the plain guard()
> calls. I'm also not sure as a community we want to see changes like
> this.
> 
Why I do this is because I see more and more drivers converting to using
Scope-based resource management mechanisms to manage resources,
not just locks, but memory and some other resources. I think the community
should actively embrace this new mechanism.
Markus Elfring June 23, 2024, 8:22 a.m. UTC | #7
> > > +++ b/drivers/net/ethernet/freescale/fec_ptp.c
> > > @@ -99,18 +99,17 @@
> > >   */
> > >  static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
> > >  {
> > > -	unsigned long flags;
> > >  	u32 val, tempval;
> > >  	struct timespec64 ts;
> > >  	u64 ns;
> > >
> > > -	if (fep->pps_enable == enable)
> > > -		return 0;
> > > -
> > >  	fep->pps_channel = DEFAULT_PPS_CHANNEL;
> > >  	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
> > >
> > > -	spin_lock_irqsave(&fep->tmreg_lock, flags);
> > > +	guard(spinlock_irqsave)(&fep->tmreg_lock);
> > > +
> > > +	if (fep->pps_enable == enable)
> > > +		return 0;
> >
> > This is not obviously correct. Why has this condition moved?
> >
> As you see, the assignment of ' pps_enable ' is protected by the 'tmreg_lock',
> But the read operation of 'pps_enable' was not protected by the lock, so the
> Coverity tool will complain a LOCK EVASION warning which may cause data
> race to occur when running in a multithreaded environment.

Should such information trigger the addition of any corresponding tags
(like “Fixes” and “Cc”)?


>                                                            Of course, this
> data race issue is almost impossible, so I modified it by the way. Because I don't
> really want to fix it through another patch, unless you insist on doing so.

Would you like to take the known advice “Solve only one problem per patch”
better into account?
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/process/submitting-patches.rst?h=v6.10-rc4#n81

Please take another look at further approaches for the presentation of
similar “change combinations”.

Regards,
Markus
diff mbox series

Patch

diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8bd213da8fb6..5f98c0615115 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1397,12 +1397,11 @@  static void
 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
 	struct skb_shared_hwtstamps *hwtstamps)
 {
-	unsigned long flags;
 	u64 ns;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
-	ns = timecounter_cyc2time(&fep->tc, ts);
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
+		ns = timecounter_cyc2time(&fep->tc, ts);
+	}
 
 	memset(hwtstamps, 0, sizeof(*hwtstamps));
 	hwtstamps->hwtstamp = ns_to_ktime(ns);
@@ -2313,15 +2312,13 @@  static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 			return ret;
 
 		if (fep->clk_ptp) {
-			mutex_lock(&fep->ptp_clk_mutex);
-			ret = clk_prepare_enable(fep->clk_ptp);
-			if (ret) {
-				mutex_unlock(&fep->ptp_clk_mutex);
-				goto failed_clk_ptp;
-			} else {
-				fep->ptp_clk_on = true;
+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
+				ret = clk_prepare_enable(fep->clk_ptp);
+				if (ret)
+					goto failed_clk_ptp;
+				else
+					fep->ptp_clk_on = true;
 			}
-			mutex_unlock(&fep->ptp_clk_mutex);
 		}
 
 		ret = clk_prepare_enable(fep->clk_ref);
@@ -2336,10 +2333,10 @@  static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 	} else {
 		clk_disable_unprepare(fep->clk_enet_out);
 		if (fep->clk_ptp) {
-			mutex_lock(&fep->ptp_clk_mutex);
-			clk_disable_unprepare(fep->clk_ptp);
-			fep->ptp_clk_on = false;
-			mutex_unlock(&fep->ptp_clk_mutex);
+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
+				clk_disable_unprepare(fep->clk_ptp);
+				fep->ptp_clk_on = false;
+			}
 		}
 		clk_disable_unprepare(fep->clk_ref);
 		clk_disable_unprepare(fep->clk_2x_txclk);
@@ -2352,10 +2349,10 @@  static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 		clk_disable_unprepare(fep->clk_ref);
 failed_clk_ref:
 	if (fep->clk_ptp) {
-		mutex_lock(&fep->ptp_clk_mutex);
-		clk_disable_unprepare(fep->clk_ptp);
-		fep->ptp_clk_on = false;
-		mutex_unlock(&fep->ptp_clk_mutex);
+		scoped_guard(mutex, &fep->ptp_clk_mutex) {
+			clk_disable_unprepare(fep->clk_ptp);
+			fep->ptp_clk_on = false;
+		}
 	}
 failed_clk_ptp:
 	clk_disable_unprepare(fep->clk_enet_out);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 181d9bfbee22..ed64e077a64a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -99,18 +99,17 @@ 
  */
 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
 {
-	unsigned long flags;
 	u32 val, tempval;
 	struct timespec64 ts;
 	u64 ns;
 
-	if (fep->pps_enable == enable)
-		return 0;
-
 	fep->pps_channel = DEFAULT_PPS_CHANNEL;
 	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
+
+	if (fep->pps_enable == enable)
+		return 0;
 
 	if (enable) {
 		/* clear capture or output compare interrupt status if have.
@@ -195,7 +194,6 @@  static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
 	}
 
 	fep->pps_enable = enable;
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 
 	return 0;
 }
@@ -204,9 +202,8 @@  static int fec_ptp_pps_perout(struct fec_enet_private *fep)
 {
 	u32 compare_val, ptp_hc, temp_val;
 	u64 curr_time;
-	unsigned long flags;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
 
 	/* Update time counter */
 	timecounter_read(&fep->tc);
@@ -229,7 +226,6 @@  static int fec_ptp_pps_perout(struct fec_enet_private *fep)
 	 */
 	if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
 		dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
-		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 		return -1;
 	}
 
@@ -257,7 +253,6 @@  static int fec_ptp_pps_perout(struct fec_enet_private *fep)
 	 */
 	writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
 	fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 
 	return 0;
 }
@@ -307,13 +302,12 @@  static u64 fec_ptp_read(const struct cyclecounter *cc)
 void fec_ptp_start_cyclecounter(struct net_device *ndev)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
-	unsigned long flags;
 	int inc;
 
 	inc = 1000000000 / fep->cycle_speed;
 
 	/* grab the ptp lock */
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
 
 	/* 1ns counter */
 	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
@@ -332,8 +326,6 @@  void fec_ptp_start_cyclecounter(struct net_device *ndev)
 
 	/* reset the ns time counter */
 	timecounter_init(&fep->tc, &fep->cc, 0);
-
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 }
 
 /**
@@ -352,7 +344,6 @@  void fec_ptp_start_cyclecounter(struct net_device *ndev)
 static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 {
 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
-	unsigned long flags;
 	int neg_adj = 0;
 	u32 i, tmp;
 	u32 corr_inc, corr_period;
@@ -397,7 +388,7 @@  static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 	else
 		corr_ns = fep->ptp_inc + corr_inc;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
 
 	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
 	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
@@ -407,8 +398,6 @@  static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 	/* dummy read to update the timer. */
 	timecounter_read(&fep->tc);
 
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-
 	return 0;
 }
 
@@ -423,11 +412,9 @@  static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
 	struct fec_enet_private *fep =
 	    container_of(ptp, struct fec_enet_private, ptp_caps);
-	unsigned long flags;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
 	timecounter_adjtime(&fep->tc, delta);
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 
 	return 0;
 }
@@ -445,18 +432,16 @@  static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 	struct fec_enet_private *fep =
 	    container_of(ptp, struct fec_enet_private, ptp_caps);
 	u64 ns;
-	unsigned long flags;
 
-	mutex_lock(&fep->ptp_clk_mutex);
-	/* Check the ptp clock */
-	if (!fep->ptp_clk_on) {
-		mutex_unlock(&fep->ptp_clk_mutex);
-		return -EINVAL;
+	scoped_guard(mutex, &fep->ptp_clk_mutex) {
+		/* Check the ptp clock */
+		if (!fep->ptp_clk_on)
+			return -EINVAL;
+
+		scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
+			ns = timecounter_read(&fep->tc);
+		}
 	}
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
-	ns = timecounter_read(&fep->tc);
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-	mutex_unlock(&fep->ptp_clk_mutex);
 
 	*ts = ns_to_timespec64(ns);
 
@@ -478,15 +463,12 @@  static int fec_ptp_settime(struct ptp_clock_info *ptp,
 	    container_of(ptp, struct fec_enet_private, ptp_caps);
 
 	u64 ns;
-	unsigned long flags;
 	u32 counter;
 
-	mutex_lock(&fep->ptp_clk_mutex);
+	guard(mutex)(&fep->ptp_clk_mutex);
 	/* Check the ptp clock */
-	if (!fep->ptp_clk_on) {
-		mutex_unlock(&fep->ptp_clk_mutex);
+	if (!fep->ptp_clk_on)
 		return -EINVAL;
-	}
 
 	ns = timespec64_to_ns(ts);
 	/* Get the timer value based on timestamp.
@@ -494,21 +476,18 @@  static int fec_ptp_settime(struct ptp_clock_info *ptp,
 	 */
 	counter = ns & fep->cc.mask;
 
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
-	writel(counter, fep->hwp + FEC_ATIME);
-	timecounter_init(&fep->tc, &fep->cc, ns);
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-	mutex_unlock(&fep->ptp_clk_mutex);
+	scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
+		writel(counter, fep->hwp + FEC_ATIME);
+		timecounter_init(&fep->tc, &fep->cc, ns);
+	}
+
 	return 0;
 }
 
 static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	guard(spinlock_irqsave)(&fep->tmreg_lock);
 	writel(0, fep->hwp + FEC_TCSR(channel));
-	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 
 	return 0;
 }
@@ -528,7 +507,6 @@  static int fec_ptp_enable(struct ptp_clock_info *ptp,
 	ktime_t timeout;
 	struct timespec64 start_time, period;
 	u64 curr_time, delta, period_ns;
-	unsigned long flags;
 	int ret = 0;
 
 	if (rq->type == PTP_CLK_REQ_PPS) {
@@ -563,17 +541,18 @@  static int fec_ptp_enable(struct ptp_clock_info *ptp,
 			start_time.tv_nsec = rq->perout.start.nsec;
 			fep->perout_stime = timespec64_to_ns(&start_time);
 
-			mutex_lock(&fep->ptp_clk_mutex);
-			if (!fep->ptp_clk_on) {
-				dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
-				mutex_unlock(&fep->ptp_clk_mutex);
-				return -EOPNOTSUPP;
+			scoped_guard(mutex, &fep->ptp_clk_mutex) {
+				if (!fep->ptp_clk_on) {
+					dev_err(&fep->pdev->dev,
+						"Error: PTP clock is closed!\n");
+					return -EOPNOTSUPP;
+				}
+
+				scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
+					/* Read current timestamp */
+					curr_time = timecounter_read(&fep->tc);
+				}
 			}
-			spin_lock_irqsave(&fep->tmreg_lock, flags);
-			/* Read current timestamp */
-			curr_time = timecounter_read(&fep->tc);
-			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-			mutex_unlock(&fep->ptp_clk_mutex);
 
 			/* Calculate time difference */
 			delta = fep->perout_stime - curr_time;
@@ -653,15 +632,14 @@  static void fec_time_keep(struct work_struct *work)
 {
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
-	unsigned long flags;
 
-	mutex_lock(&fep->ptp_clk_mutex);
-	if (fep->ptp_clk_on) {
-		spin_lock_irqsave(&fep->tmreg_lock, flags);
-		timecounter_read(&fep->tc);
-		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+	scoped_guard(mutex, &fep->ptp_clk_mutex) {
+		if (fep->ptp_clk_on) {
+			scoped_guard(spinlock_irqsave, &fep->tmreg_lock) {
+				timecounter_read(&fep->tc);
+			}
+		}
 	}
-	mutex_unlock(&fep->ptp_clk_mutex);
 
 	schedule_delayed_work(&fep->time_keep, HZ);
 }