diff mbox series

[net-next,09/14] net_sched: sch_fq_codel: implement lockless fq_codel_dump()

Message ID 20240415132054.3822230-10-edumazet@google.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net_sched: first series for RTNL-less qdisc dumps | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 926 this patch: 926
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 7 of 7 maintainers
netdev/build_clang success Errors and warnings before: 937 this patch: 937
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 937 this patch: 937
netdev/checkpatch warning WARNING: line length of 83 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-04-18--00-00 (tests: 961)

Commit Message

Eric Dumazet April 15, 2024, 1:20 p.m. UTC
Instead of relying on RTNL, fq_codel_dump() can use READ_ONCE()
annotations, paired with WRITE_ONCE() ones in fq_codel_change().

Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 net/sched/sch_fq_codel.c | 57 ++++++++++++++++++++++++----------------
 1 file changed, 35 insertions(+), 22 deletions(-)

Comments

Simon Horman April 17, 2024, 5:07 p.m. UTC | #1
On Mon, Apr 15, 2024 at 01:20:49PM +0000, Eric Dumazet wrote:
> Instead of relying on RTNL, fq_codel_dump() can use READ_ONCE()
> annotations, paired with WRITE_ONCE() ones in fq_codel_change().
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> ---
>  net/sched/sch_fq_codel.c | 57 ++++++++++++++++++++++++----------------
>  1 file changed, 35 insertions(+), 22 deletions(-)
> 
> diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c

...

> @@ -529,30 +539,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
>  		goto nla_put_failure;
>  
>  	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
> -			codel_time_to_us(q->cparams.target)) ||
> +			codel_time_to_us(READ_ONCE(q->cparams.target))) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
> -			sch->limit) ||
> +			READ_ONCE(sch->limit)) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
> -			codel_time_to_us(q->cparams.interval)) ||
> +			codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
> -			q->cparams.ecn) ||
> +			READ_ONCE(q->cparams.ecn)) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
> -			q->quantum) ||
> +			READ_ONCE(q->quantum)) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
> -			q->drop_batch_size) ||
> +			READ_ONCE(q->drop_batch_size)) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
> -			q->memory_limit) ||
> +			READ_ONCE(q->memory_limit)) ||
>  	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
> -			q->flows_cnt))
> +			READ_ONCE(q->flows_cnt)))

Hi Eric,

I think you missed the corresponding update for q->flows_cnt
in fq_codel_change().

>  		goto nla_put_failure;
>  
> -	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
> +	ce_threshold = READ_ONCE(q->cparams.ce_threshold);
> +	if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
>  		if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
> -				codel_time_to_us(q->cparams.ce_threshold)))
> +				codel_time_to_us(ce_threshold)))
>  			goto nla_put_failure;
> -		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
> +		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
> +			       READ_ONCE(q->cparams.ce_threshold_selector)))
>  			goto nla_put_failure;
> -		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
> +		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
> +			       READ_ONCE(q->cparams.ce_threshold_mask)))
>  			goto nla_put_failure;
>  	}
>  
> -- 
> 2.44.0.683.g7961c838ac-goog
> 
>
Eric Dumazet April 17, 2024, 5:14 p.m. UTC | #2
On Wed, Apr 17, 2024 at 7:07 PM Simon Horman <horms@kernel.org> wrote:
>
> On Mon, Apr 15, 2024 at 01:20:49PM +0000, Eric Dumazet wrote:
> > Instead of relying on RTNL, fq_codel_dump() can use READ_ONCE()
> > annotations, paired with WRITE_ONCE() ones in fq_codel_change().
> >
> > Signed-off-by: Eric Dumazet <edumazet@google.com>
> > ---
> >  net/sched/sch_fq_codel.c | 57 ++++++++++++++++++++++++----------------
> >  1 file changed, 35 insertions(+), 22 deletions(-)
> >
> > diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
>
> ...
>
> > @@ -529,30 +539,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
> >               goto nla_put_failure;
> >
> >       if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
> > -                     codel_time_to_us(q->cparams.target)) ||
> > +                     codel_time_to_us(READ_ONCE(q->cparams.target))) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
> > -                     sch->limit) ||
> > +                     READ_ONCE(sch->limit)) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
> > -                     codel_time_to_us(q->cparams.interval)) ||
> > +                     codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_ECN,
> > -                     q->cparams.ecn) ||
> > +                     READ_ONCE(q->cparams.ecn)) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
> > -                     q->quantum) ||
> > +                     READ_ONCE(q->quantum)) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
> > -                     q->drop_batch_size) ||
> > +                     READ_ONCE(q->drop_batch_size)) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
> > -                     q->memory_limit) ||
> > +                     READ_ONCE(q->memory_limit)) ||
> >           nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
> > -                     q->flows_cnt))
> > +                     READ_ONCE(q->flows_cnt)))
>
> Hi Eric,
>
> I think you missed the corresponding update for q->flows_cnt
> in fq_codel_change().

q->flows_cnt is set at init time only, it can not change yet.

Blindly using READ_ONCE() in a dump seems good hygiene,
it is not needed yet, but does no harm.
Simon Horman April 17, 2024, 5:22 p.m. UTC | #3
On Wed, Apr 17, 2024 at 07:14:10PM +0200, Eric Dumazet wrote:
> On Wed, Apr 17, 2024 at 7:07 PM Simon Horman <horms@kernel.org> wrote:
> >
> > On Mon, Apr 15, 2024 at 01:20:49PM +0000, Eric Dumazet wrote:
> > > Instead of relying on RTNL, fq_codel_dump() can use READ_ONCE()
> > > annotations, paired with WRITE_ONCE() ones in fq_codel_change().
> > >
> > > Signed-off-by: Eric Dumazet <edumazet@google.com>
> > > ---
> > >  net/sched/sch_fq_codel.c | 57 ++++++++++++++++++++++++----------------
> > >  1 file changed, 35 insertions(+), 22 deletions(-)
> > >
> > > diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
> >
> > ...
> >
> > > @@ -529,30 +539,33 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
> > >               goto nla_put_failure;
> > >
> > >       if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
> > > -                     codel_time_to_us(q->cparams.target)) ||
> > > +                     codel_time_to_us(READ_ONCE(q->cparams.target))) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
> > > -                     sch->limit) ||
> > > +                     READ_ONCE(sch->limit)) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
> > > -                     codel_time_to_us(q->cparams.interval)) ||
> > > +                     codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_ECN,
> > > -                     q->cparams.ecn) ||
> > > +                     READ_ONCE(q->cparams.ecn)) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
> > > -                     q->quantum) ||
> > > +                     READ_ONCE(q->quantum)) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
> > > -                     q->drop_batch_size) ||
> > > +                     READ_ONCE(q->drop_batch_size)) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
> > > -                     q->memory_limit) ||
> > > +                     READ_ONCE(q->memory_limit)) ||
> > >           nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
> > > -                     q->flows_cnt))
> > > +                     READ_ONCE(q->flows_cnt)))
> >
> > Hi Eric,
> >
> > I think you missed the corresponding update for q->flows_cnt
> > in fq_codel_change().
> 
> q->flows_cnt is set at init time only, it can not change yet.

Sorry, I missed that important detail.

> Blindly using READ_ONCE() in a dump seems good hygiene,
> it is not needed yet, but does no harm.

Thanks, understood.

Reviewed-by: Simon Horman <horms@kernel.org>
diff mbox series

Patch

diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 79f9d6de6c852268fa293f8067d029d385b54976..4f908c11ba9528f8f9f3af6752ff10005d6f6511 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -396,40 +396,49 @@  static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
 	if (tb[TCA_FQ_CODEL_TARGET]) {
 		u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
 
-		q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
+		WRITE_ONCE(q->cparams.target,
+			   (target * NSEC_PER_USEC) >> CODEL_SHIFT);
 	}
 
 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
 		u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
 
-		q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+		WRITE_ONCE(q->cparams.ce_threshold,
+			   (val * NSEC_PER_USEC) >> CODEL_SHIFT);
 	}
 
 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
-		q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
+		WRITE_ONCE(q->cparams.ce_threshold_selector,
+			   nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]));
 	if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
-		q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
+		WRITE_ONCE(q->cparams.ce_threshold_mask,
+			   nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]));
 
 	if (tb[TCA_FQ_CODEL_INTERVAL]) {
 		u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
 
-		q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
+		WRITE_ONCE(q->cparams.interval,
+			   (interval * NSEC_PER_USEC) >> CODEL_SHIFT);
 	}
 
 	if (tb[TCA_FQ_CODEL_LIMIT])
-		sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
+		WRITE_ONCE(sch->limit,
+			   nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]));
 
 	if (tb[TCA_FQ_CODEL_ECN])
-		q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
+		WRITE_ONCE(q->cparams.ecn,
+			   !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]));
 
 	if (quantum)
-		q->quantum = quantum;
+		WRITE_ONCE(q->quantum, quantum);
 
 	if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
-		q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+		WRITE_ONCE(q->drop_batch_size,
+			   max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])));
 
 	if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
-		q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
+		WRITE_ONCE(q->memory_limit,
+			   min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])));
 
 	while (sch->q.qlen > sch->limit ||
 	       q->memory_usage > q->memory_limit) {
@@ -522,6 +531,7 @@  static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
+	codel_time_t ce_threshold;
 	struct nlattr *opts;
 
 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -529,30 +539,33 @@  static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
 		goto nla_put_failure;
 
 	if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
-			codel_time_to_us(q->cparams.target)) ||
+			codel_time_to_us(READ_ONCE(q->cparams.target))) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
-			sch->limit) ||
+			READ_ONCE(sch->limit)) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
-			codel_time_to_us(q->cparams.interval)) ||
+			codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_ECN,
-			q->cparams.ecn) ||
+			READ_ONCE(q->cparams.ecn)) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
-			q->quantum) ||
+			READ_ONCE(q->quantum)) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
-			q->drop_batch_size) ||
+			READ_ONCE(q->drop_batch_size)) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
-			q->memory_limit) ||
+			READ_ONCE(q->memory_limit)) ||
 	    nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
-			q->flows_cnt))
+			READ_ONCE(q->flows_cnt)))
 		goto nla_put_failure;
 
-	if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
+	ce_threshold = READ_ONCE(q->cparams.ce_threshold);
+	if (ce_threshold != CODEL_DISABLED_THRESHOLD) {
 		if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
-				codel_time_to_us(q->cparams.ce_threshold)))
+				codel_time_to_us(ce_threshold)))
 			goto nla_put_failure;
-		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
+		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR,
+			       READ_ONCE(q->cparams.ce_threshold_selector)))
 			goto nla_put_failure;
-		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
+		if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK,
+			       READ_ONCE(q->cparams.ce_threshold_mask)))
 			goto nla_put_failure;
 	}