diff mbox series

[v1,net-next,2/6] af_unix: Save the number of loops in inflight graph.

Message ID 20240503223150.6035-3-kuniyu@amazon.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series af_unix: GC cleanup and optimisation | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 928 this patch: 928
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 938 this patch: 938
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 939 this patch: 939
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 60 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-05-05--03-00 (tests: 1003)

Commit Message

Kuniyuki Iwashima May 3, 2024, 10:31 p.m. UTC
unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we
can make unix_graph_maybe_cyclic false when all SCC are cleaned up.

If we count the number of loops in the graph during Tarjan's algorithm,
we need not call unix_scc_cyclic() in unix_walk_scc_fast().

Instead, we can just decrement the number when calling unix_collect_skb()
and update unix_graph_maybe_cyclic based on the count.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
---
 net/unix/garbage.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

Comments

Paolo Abeni May 7, 2024, 1:54 p.m. UTC | #1
On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote:
> unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we
> can make unix_graph_maybe_cyclic false when all SCC are cleaned up.
> 
> If we count the number of loops in the graph during Tarjan's algorithm,
> we need not call unix_scc_cyclic() in unix_walk_scc_fast().
> 
> Instead, we can just decrement the number when calling unix_collect_skb()
> and update unix_graph_maybe_cyclic based on the count.
> 
> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> ---
>  net/unix/garbage.c | 19 +++++++++++--------
>  1 file changed, 11 insertions(+), 8 deletions(-)
> 
> diff --git a/net/unix/garbage.c b/net/unix/garbage.c
> index 1f8b8cdfcdc8..7ffb80dd422c 100644
> --- a/net/unix/garbage.c
> +++ b/net/unix/garbage.c
> @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc)
>  
>  static LIST_HEAD(unix_visited_vertices);
>  static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
> +static unsigned long unix_graph_circles;
>  
>  static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
>  			    struct sk_buff_head *hitlist)
> @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
>  
>  		if (scc_dead)
>  			unix_collect_skb(&scc, hitlist);
> -		else if (!unix_graph_maybe_cyclic)
> -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> +		else if (unix_scc_cyclic(&scc))
> +			unix_graph_circles++;
>  
>  		list_del(&scc);
>  	}
> @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
>  {
>  	unsigned long last_index = UNIX_VERTEX_INDEX_START;
>  
> -	unix_graph_maybe_cyclic = false;
> +	unix_graph_circles = 0;
>  
>  	/* Visit every vertex exactly once.
>  	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
> @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
>  	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
>  	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
>  
> +	unix_graph_maybe_cyclic = !!unix_graph_circles;
>  	unix_graph_grouped = true;
>  }
>  
>  static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
>  {
> -	unix_graph_maybe_cyclic = false;
> -
>  	while (!list_empty(&unix_unvisited_vertices)) {
>  		struct unix_vertex *vertex;
>  		struct list_head scc;
> @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
>  				scc_dead = unix_vertex_dead(vertex);
>  		}
>  
> -		if (scc_dead)
> +		if (scc_dead) {
>  			unix_collect_skb(&scc, hitlist);
> -		else if (!unix_graph_maybe_cyclic)
> -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> +			unix_graph_circles--;

Possibly WARN_ON_ONCE(unix_graph_circles < 0) ?

I find this patch a little scaring - meaning I can't understand it
fully, I'm wondering if it would make any sense to postpone this patch
to the next cycle?

Thanks,

Paolo
Kuniyuki Iwashima May 7, 2024, 4:11 p.m. UTC | #2
From: Paolo Abeni <pabeni@redhat.com>
Date: Tue, 07 May 2024 15:54:33 +0200
> On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote:
> > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we
> > can make unix_graph_maybe_cyclic false when all SCC are cleaned up.
> > 
> > If we count the number of loops in the graph during Tarjan's algorithm,
> > we need not call unix_scc_cyclic() in unix_walk_scc_fast().
> > 
> > Instead, we can just decrement the number when calling unix_collect_skb()
> > and update unix_graph_maybe_cyclic based on the count.
> > 
> > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > ---
> >  net/unix/garbage.c | 19 +++++++++++--------
> >  1 file changed, 11 insertions(+), 8 deletions(-)
> > 
> > diff --git a/net/unix/garbage.c b/net/unix/garbage.c
> > index 1f8b8cdfcdc8..7ffb80dd422c 100644
> > --- a/net/unix/garbage.c
> > +++ b/net/unix/garbage.c
> > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc)
> >  
> >  static LIST_HEAD(unix_visited_vertices);
> >  static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
> > +static unsigned long unix_graph_circles;
> >  
> >  static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
> >  			    struct sk_buff_head *hitlist)
> > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
> >  
> >  		if (scc_dead)
> >  			unix_collect_skb(&scc, hitlist);
> > -		else if (!unix_graph_maybe_cyclic)
> > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > +		else if (unix_scc_cyclic(&scc))
> > +			unix_graph_circles++;
> >  
> >  		list_del(&scc);
> >  	}
> > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> >  {
> >  	unsigned long last_index = UNIX_VERTEX_INDEX_START;
> >  
> > -	unix_graph_maybe_cyclic = false;
> > +	unix_graph_circles = 0;
> >  
> >  	/* Visit every vertex exactly once.
> >  	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
> > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> >  	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
> >  	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
> >  
> > +	unix_graph_maybe_cyclic = !!unix_graph_circles;
> >  	unix_graph_grouped = true;
> >  }
> >  
> >  static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> >  {
> > -	unix_graph_maybe_cyclic = false;
> > -
> >  	while (!list_empty(&unix_unvisited_vertices)) {
> >  		struct unix_vertex *vertex;
> >  		struct list_head scc;
> > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> >  				scc_dead = unix_vertex_dead(vertex);
> >  		}
> >  
> > -		if (scc_dead)
> > +		if (scc_dead) {
> >  			unix_collect_skb(&scc, hitlist);
> > -		else if (!unix_graph_maybe_cyclic)
> > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > +			unix_graph_circles--;
> 
> Possibly WARN_ON_ONCE(unix_graph_circles < 0) ?

Will add in v2.

> 
> I find this patch a little scaring - meaning I can't understand it
> fully,
> I'm wondering if it would make any sense to postpone this patch
> to the next cycle?

It's fine by me to postpone patch 2 - 5, but it would be appreciated
if patch 1 makes it to this cycle.

Thanks!
Paolo Abeni May 8, 2024, 10:08 a.m. UTC | #3
On Tue, 2024-05-07 at 09:11 -0700, Kuniyuki Iwashima wrote:
> From: Paolo Abeni <pabeni@redhat.com>
> Date: Tue, 07 May 2024 15:54:33 +0200
> > On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote:
> > > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we
> > > can make unix_graph_maybe_cyclic false when all SCC are cleaned up.
> > > 
> > > If we count the number of loops in the graph during Tarjan's algorithm,
> > > we need not call unix_scc_cyclic() in unix_walk_scc_fast().
> > > 
> > > Instead, we can just decrement the number when calling unix_collect_skb()
> > > and update unix_graph_maybe_cyclic based on the count.
> > > 
> > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > ---
> > >  net/unix/garbage.c | 19 +++++++++++--------
> > >  1 file changed, 11 insertions(+), 8 deletions(-)
> > > 
> > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c
> > > index 1f8b8cdfcdc8..7ffb80dd422c 100644
> > > --- a/net/unix/garbage.c
> > > +++ b/net/unix/garbage.c
> > > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc)
> > >  
> > >  static LIST_HEAD(unix_visited_vertices);
> > >  static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
> > > +static unsigned long unix_graph_circles;
> > >  
> > >  static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
> > >  			    struct sk_buff_head *hitlist)
> > > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
> > >  
> > >  		if (scc_dead)
> > >  			unix_collect_skb(&scc, hitlist);
> > > -		else if (!unix_graph_maybe_cyclic)
> > > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > > +		else if (unix_scc_cyclic(&scc))
> > > +			unix_graph_circles++;
> > >  
> > >  		list_del(&scc);
> > >  	}
> > > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> > >  {
> > >  	unsigned long last_index = UNIX_VERTEX_INDEX_START;
> > >  
> > > -	unix_graph_maybe_cyclic = false;
> > > +	unix_graph_circles = 0;
> > >  
> > >  	/* Visit every vertex exactly once.
> > >  	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
> > > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> > >  	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
> > >  	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
> > >  
> > > +	unix_graph_maybe_cyclic = !!unix_graph_circles;
> > >  	unix_graph_grouped = true;
> > >  }
> > >  
> > >  static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> > >  {
> > > -	unix_graph_maybe_cyclic = false;
> > > -
> > >  	while (!list_empty(&unix_unvisited_vertices)) {
> > >  		struct unix_vertex *vertex;
> > >  		struct list_head scc;
> > > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> > >  				scc_dead = unix_vertex_dead(vertex);
> > >  		}
> > >  
> > > -		if (scc_dead)
> > > +		if (scc_dead) {
> > >  			unix_collect_skb(&scc, hitlist);
> > > -		else if (!unix_graph_maybe_cyclic)
> > > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > > +			unix_graph_circles--;
> > 
> > Possibly WARN_ON_ONCE(unix_graph_circles < 0) ?
> 
> Will add in v2.
> 
> > 
> > I find this patch a little scaring - meaning I can't understand it
> > fully,
> > I'm wondering if it would make any sense to postpone this patch
> > to the next cycle?
> 
> It's fine by me to postpone patch 2 - 5, but it would be appreciated
> if patch 1 makes it to this cycle.

Yes, patch 1 looks fine and safe to me. Feel free to re-submit that one
individually right now, with my Acked-by tag.

Thanks!

Paolo
Kuniyuki Iwashima May 8, 2024, 5:05 p.m. UTC | #4
From: Paolo Abeni <pabeni@redhat.com>
Date: Wed, 08 May 2024 12:08:48 +0200
> On Tue, 2024-05-07 at 09:11 -0700, Kuniyuki Iwashima wrote:
> > From: Paolo Abeni <pabeni@redhat.com>
> > Date: Tue, 07 May 2024 15:54:33 +0200
> > > On Fri, 2024-05-03 at 15:31 -0700, Kuniyuki Iwashima wrote:
> > > > unix_walk_scc_fast() calls unix_scc_cyclic() for every SCC so that we
> > > > can make unix_graph_maybe_cyclic false when all SCC are cleaned up.
> > > > 
> > > > If we count the number of loops in the graph during Tarjan's algorithm,
> > > > we need not call unix_scc_cyclic() in unix_walk_scc_fast().
> > > > 
> > > > Instead, we can just decrement the number when calling unix_collect_skb()
> > > > and update unix_graph_maybe_cyclic based on the count.
> > > > 
> > > > Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
> > > > ---
> > > >  net/unix/garbage.c | 19 +++++++++++--------
> > > >  1 file changed, 11 insertions(+), 8 deletions(-)
> > > > 
> > > > diff --git a/net/unix/garbage.c b/net/unix/garbage.c
> > > > index 1f8b8cdfcdc8..7ffb80dd422c 100644
> > > > --- a/net/unix/garbage.c
> > > > +++ b/net/unix/garbage.c
> > > > @@ -405,6 +405,7 @@ static bool unix_scc_cyclic(struct list_head *scc)
> > > >  
> > > >  static LIST_HEAD(unix_visited_vertices);
> > > >  static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
> > > > +static unsigned long unix_graph_circles;
> > > >  
> > > >  static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
> > > >  			    struct sk_buff_head *hitlist)
> > > > @@ -494,8 +495,8 @@ static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
> > > >  
> > > >  		if (scc_dead)
> > > >  			unix_collect_skb(&scc, hitlist);
> > > > -		else if (!unix_graph_maybe_cyclic)
> > > > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > > > +		else if (unix_scc_cyclic(&scc))
> > > > +			unix_graph_circles++;
> > > >  
> > > >  		list_del(&scc);
> > > >  	}
> > > > @@ -509,7 +510,7 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> > > >  {
> > > >  	unsigned long last_index = UNIX_VERTEX_INDEX_START;
> > > >  
> > > > -	unix_graph_maybe_cyclic = false;
> > > > +	unix_graph_circles = 0;
> > > >  
> > > >  	/* Visit every vertex exactly once.
> > > >  	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
> > > > @@ -524,13 +525,12 @@ static void unix_walk_scc(struct sk_buff_head *hitlist)
> > > >  	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
> > > >  	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
> > > >  
> > > > +	unix_graph_maybe_cyclic = !!unix_graph_circles;
> > > >  	unix_graph_grouped = true;
> > > >  }
> > > >  
> > > >  static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> > > >  {
> > > > -	unix_graph_maybe_cyclic = false;
> > > > -
> > > >  	while (!list_empty(&unix_unvisited_vertices)) {
> > > >  		struct unix_vertex *vertex;
> > > >  		struct list_head scc;
> > > > @@ -546,15 +546,18 @@ static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
> > > >  				scc_dead = unix_vertex_dead(vertex);
> > > >  		}
> > > >  
> > > > -		if (scc_dead)
> > > > +		if (scc_dead) {
> > > >  			unix_collect_skb(&scc, hitlist);
> > > > -		else if (!unix_graph_maybe_cyclic)
> > > > -			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
> > > > +			unix_graph_circles--;
> > > 
> > > Possibly WARN_ON_ONCE(unix_graph_circles < 0) ?
> > 
> > Will add in v2.
> > 
> > > 
> > > I find this patch a little scaring - meaning I can't understand it
> > > fully,
> > > I'm wondering if it would make any sense to postpone this patch
> > > to the next cycle?
> > 
> > It's fine by me to postpone patch 2 - 5, but it would be appreciated
> > if patch 1 makes it to this cycle.
> 
> Yes, patch 1 looks fine and safe to me. Feel free to re-submit that one
> individually right now, with my Acked-by tag.

Thanks, will do!
diff mbox series

Patch

diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 1f8b8cdfcdc8..7ffb80dd422c 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -405,6 +405,7 @@  static bool unix_scc_cyclic(struct list_head *scc)
 
 static LIST_HEAD(unix_visited_vertices);
 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
+static unsigned long unix_graph_circles;
 
 static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
 			    struct sk_buff_head *hitlist)
@@ -494,8 +495,8 @@  static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_inde
 
 		if (scc_dead)
 			unix_collect_skb(&scc, hitlist);
-		else if (!unix_graph_maybe_cyclic)
-			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+		else if (unix_scc_cyclic(&scc))
+			unix_graph_circles++;
 
 		list_del(&scc);
 	}
@@ -509,7 +510,7 @@  static void unix_walk_scc(struct sk_buff_head *hitlist)
 {
 	unsigned long last_index = UNIX_VERTEX_INDEX_START;
 
-	unix_graph_maybe_cyclic = false;
+	unix_graph_circles = 0;
 
 	/* Visit every vertex exactly once.
 	 * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
@@ -524,13 +525,12 @@  static void unix_walk_scc(struct sk_buff_head *hitlist)
 	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
 	swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
 
+	unix_graph_maybe_cyclic = !!unix_graph_circles;
 	unix_graph_grouped = true;
 }
 
 static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
 {
-	unix_graph_maybe_cyclic = false;
-
 	while (!list_empty(&unix_unvisited_vertices)) {
 		struct unix_vertex *vertex;
 		struct list_head scc;
@@ -546,15 +546,18 @@  static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
 				scc_dead = unix_vertex_dead(vertex);
 		}
 
-		if (scc_dead)
+		if (scc_dead) {
 			unix_collect_skb(&scc, hitlist);
-		else if (!unix_graph_maybe_cyclic)
-			unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
+			unix_graph_circles--;
+		}
 
 		list_del(&scc);
 	}
 
 	list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
+
+	if (!unix_graph_circles)
+		unix_graph_maybe_cyclic = false;
 }
 
 static bool gc_in_progress;