diff mbox series

[v5,1/9] slub: Reflow ___slab_alloc()

Message ID 20231102032330.1036151-2-chengming.zhou@linux.dev (mailing list archive)
State New
Headers show
Series slub: Delay freezing of CPU partial slabs | expand

Commit Message

Chengming Zhou Nov. 2, 2023, 3:23 a.m. UTC
From: Chengming Zhou <zhouchengming@bytedance.com>

The get_partial() interface used in ___slab_alloc() may return a single
object in the "kmem_cache_debug(s)" case, in which we will just return
the "freelist" object.

Move this handling up to prepare for later changes.

And the "pfmemalloc_match()" part is not needed for node partial slab,
since we already check this in the get_partial_node().

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slub.c | 31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

Comments

Hyeonggon Yoo Nov. 22, 2023, 12:26 a.m. UTC | #1
On Thu, Nov 2, 2023 at 12:24 PM <chengming.zhou@linux.dev> wrote:
>
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> The get_partial() interface used in ___slab_alloc() may return a single
> object in the "kmem_cache_debug(s)" case, in which we will just return
> the "freelist" object.
>
> Move this handling up to prepare for later changes.
>
> And the "pfmemalloc_match()" part is not needed for node partial slab,
> since we already check this in the get_partial_node().
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
>  mm/slub.c | 31 +++++++++++++++----------------
>  1 file changed, 15 insertions(+), 16 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 63d281dfacdb..0b0fdc8c189f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3216,8 +3216,21 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>         pc.slab = &slab;
>         pc.orig_size = orig_size;
>         freelist = get_partial(s, node, &pc);
> -       if (freelist)
> -               goto check_new_slab;
> +       if (freelist) {
> +               if (kmem_cache_debug(s)) {
> +                       /*
> +                        * For debug caches here we had to go through
> +                        * alloc_single_from_partial() so just store the
> +                        * tracking info and return the object.
> +                        */
> +                       if (s->flags & SLAB_STORE_USER)
> +                               set_track(s, freelist, TRACK_ALLOC, addr);
> +
> +                       return freelist;
> +               }
> +
> +               goto retry_load_slab;
> +       }
>
>         slub_put_cpu_ptr(s->cpu_slab);
>         slab = new_slab(s, gfpflags, node);
> @@ -3253,20 +3266,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>
>         inc_slabs_node(s, slab_nid(slab), slab->objects);
>
> -check_new_slab:
> -
> -       if (kmem_cache_debug(s)) {
> -               /*
> -                * For debug caches here we had to go through
> -                * alloc_single_from_partial() so just store the tracking info
> -                * and return the object
> -                */
> -               if (s->flags & SLAB_STORE_USER)
> -                       set_track(s, freelist, TRACK_ALLOC, addr);
> -
> -               return freelist;
> -       }
> -
>         if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
>                 /*
>                  * For !pfmemalloc_match() case we don't load freelist so that

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> --
> 2.20.1
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 63d281dfacdb..0b0fdc8c189f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3216,8 +3216,21 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	pc.slab = &slab;
 	pc.orig_size = orig_size;
 	freelist = get_partial(s, node, &pc);
-	if (freelist)
-		goto check_new_slab;
+	if (freelist) {
+		if (kmem_cache_debug(s)) {
+			/*
+			 * For debug caches here we had to go through
+			 * alloc_single_from_partial() so just store the
+			 * tracking info and return the object.
+			 */
+			if (s->flags & SLAB_STORE_USER)
+				set_track(s, freelist, TRACK_ALLOC, addr);
+
+			return freelist;
+		}
+
+		goto retry_load_slab;
+	}
 
 	slub_put_cpu_ptr(s->cpu_slab);
 	slab = new_slab(s, gfpflags, node);
@@ -3253,20 +3266,6 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 
 	inc_slabs_node(s, slab_nid(slab), slab->objects);
 
-check_new_slab:
-
-	if (kmem_cache_debug(s)) {
-		/*
-		 * For debug caches here we had to go through
-		 * alloc_single_from_partial() so just store the tracking info
-		 * and return the object
-		 */
-		if (s->flags & SLAB_STORE_USER)
-			set_track(s, freelist, TRACK_ALLOC, addr);
-
-		return freelist;
-	}
-
 	if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
 		/*
 		 * For !pfmemalloc_match() case we don't load freelist so that