diff mbox series

[v5,8/9] slub: Rename all *unfreeze_partials* functions to *put_partials*

Message ID 20231102032330.1036151-9-chengming.zhou@linux.dev (mailing list archive)
State New
Headers show
Series slub: Delay freezing of CPU partial slabs | expand

Commit Message

Chengming Zhou Nov. 2, 2023, 3:23 a.m. UTC
From: Chengming Zhou <zhouchengming@bytedance.com>

Since all partial slabs on the CPU partial list are not frozen anymore,
we don't unfreeze when moving cpu partial slabs to node partial list,
it's better to rename these functions.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slub.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

Comments

Hyeonggon Yoo Dec. 3, 2023, 9:27 a.m. UTC | #1
On Thu, Nov 2, 2023 at 12:25 PM <chengming.zhou@linux.dev> wrote:
>
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> Since all partial slabs on the CPU partial list are not frozen anymore,
> we don't unfreeze when moving cpu partial slabs to node partial list,
> it's better to rename these functions.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> ---
>  mm/slub.c | 34 +++++++++++++++++-----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index d137468fe4b9..c20bdf5dab0f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2546,7 +2546,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  }
>
>  #ifdef CONFIG_SLUB_CPU_PARTIAL
> -static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
> +static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
>  {
>         struct kmem_cache_node *n = NULL, *n2 = NULL;
>         struct slab *slab, *slab_to_discard = NULL;
> @@ -2588,9 +2588,9 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
>  }
>
>  /*
> - * Unfreeze all the cpu partial slabs.
> + * Put all the cpu partial slabs to the node partial list.
>   */
> -static void unfreeze_partials(struct kmem_cache *s)
> +static void put_partials(struct kmem_cache *s)
>  {
>         struct slab *partial_slab;
>         unsigned long flags;
> @@ -2601,11 +2601,11 @@ static void unfreeze_partials(struct kmem_cache *s)
>         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
>
>         if (partial_slab)
> -               __unfreeze_partials(s, partial_slab);
> +               __put_partials(s, partial_slab);
>  }
>
> -static void unfreeze_partials_cpu(struct kmem_cache *s,
> -                                 struct kmem_cache_cpu *c)
> +static void put_partials_cpu(struct kmem_cache *s,
> +                            struct kmem_cache_cpu *c)
>  {
>         struct slab *partial_slab;
>
> @@ -2613,7 +2613,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
>         c->partial = NULL;
>
>         if (partial_slab)
> -               __unfreeze_partials(s, partial_slab);
> +               __put_partials(s, partial_slab);
>  }
>
>  /*
> @@ -2626,7 +2626,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
>  static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
>  {
>         struct slab *oldslab;
> -       struct slab *slab_to_unfreeze = NULL;
> +       struct slab *slab_to_put = NULL;
>         unsigned long flags;
>         int slabs = 0;
>
> @@ -2641,7 +2641,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
>                          * per node partial list. Postpone the actual unfreezing
>                          * outside of the critical section.
>                          */
> -                       slab_to_unfreeze = oldslab;
> +                       slab_to_put = oldslab;
>                         oldslab = NULL;
>                 } else {
>                         slabs = oldslab->slabs;
> @@ -2657,17 +2657,17 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
>
>         local_unlock_irqrestore(&s->cpu_slab->lock, flags);
>
> -       if (slab_to_unfreeze) {
> -               __unfreeze_partials(s, slab_to_unfreeze);
> +       if (slab_to_put) {
> +               __put_partials(s, slab_to_put);
>                 stat(s, CPU_PARTIAL_DRAIN);
>         }
>  }
>
>  #else  /* CONFIG_SLUB_CPU_PARTIAL */
>
> -static inline void unfreeze_partials(struct kmem_cache *s) { }
> -static inline void unfreeze_partials_cpu(struct kmem_cache *s,
> -                                 struct kmem_cache_cpu *c) { }
> +static inline void put_partials(struct kmem_cache *s) { }
> +static inline void put_partials_cpu(struct kmem_cache *s,
> +                                   struct kmem_cache_cpu *c) { }
>
>  #endif /* CONFIG_SLUB_CPU_PARTIAL */
>
> @@ -2709,7 +2709,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
>                 stat(s, CPUSLAB_FLUSH);
>         }
>
> -       unfreeze_partials_cpu(s, c);
> +       put_partials_cpu(s, c);
>  }
>
>  struct slub_flush_work {
> @@ -2737,7 +2737,7 @@ static void flush_cpu_slab(struct work_struct *w)
>         if (c->slab)
>                 flush_slab(s, c);
>
> -       unfreeze_partials(s);
> +       put_partials(s);
>  }
>
>  static bool has_cpu_slab(int cpu, struct kmem_cache *s)
> @@ -3168,7 +3168,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
>                 if (unlikely(!node_match(slab, node) ||
>                              !pfmemalloc_match(slab, gfpflags))) {
>                         slab->next = NULL;
> -                       __unfreeze_partials(s, slab);
> +                       __put_partials(s, slab);
>                         continue;
>                 }
>
> --

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks!

> 2.20.1
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index d137468fe4b9..c20bdf5dab0f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2546,7 +2546,7 @@  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 }
 
 #ifdef CONFIG_SLUB_CPU_PARTIAL
-static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
+static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
 {
 	struct kmem_cache_node *n = NULL, *n2 = NULL;
 	struct slab *slab, *slab_to_discard = NULL;
@@ -2588,9 +2588,9 @@  static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
 }
 
 /*
- * Unfreeze all the cpu partial slabs.
+ * Put all the cpu partial slabs to the node partial list.
  */
-static void unfreeze_partials(struct kmem_cache *s)
+static void put_partials(struct kmem_cache *s)
 {
 	struct slab *partial_slab;
 	unsigned long flags;
@@ -2601,11 +2601,11 @@  static void unfreeze_partials(struct kmem_cache *s)
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
 	if (partial_slab)
-		__unfreeze_partials(s, partial_slab);
+		__put_partials(s, partial_slab);
 }
 
-static void unfreeze_partials_cpu(struct kmem_cache *s,
-				  struct kmem_cache_cpu *c)
+static void put_partials_cpu(struct kmem_cache *s,
+			     struct kmem_cache_cpu *c)
 {
 	struct slab *partial_slab;
 
@@ -2613,7 +2613,7 @@  static void unfreeze_partials_cpu(struct kmem_cache *s,
 	c->partial = NULL;
 
 	if (partial_slab)
-		__unfreeze_partials(s, partial_slab);
+		__put_partials(s, partial_slab);
 }
 
 /*
@@ -2626,7 +2626,7 @@  static void unfreeze_partials_cpu(struct kmem_cache *s,
 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
 {
 	struct slab *oldslab;
-	struct slab *slab_to_unfreeze = NULL;
+	struct slab *slab_to_put = NULL;
 	unsigned long flags;
 	int slabs = 0;
 
@@ -2641,7 +2641,7 @@  static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
 			 * per node partial list. Postpone the actual unfreezing
 			 * outside of the critical section.
 			 */
-			slab_to_unfreeze = oldslab;
+			slab_to_put = oldslab;
 			oldslab = NULL;
 		} else {
 			slabs = oldslab->slabs;
@@ -2657,17 +2657,17 @@  static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
 
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
-	if (slab_to_unfreeze) {
-		__unfreeze_partials(s, slab_to_unfreeze);
+	if (slab_to_put) {
+		__put_partials(s, slab_to_put);
 		stat(s, CPU_PARTIAL_DRAIN);
 	}
 }
 
 #else	/* CONFIG_SLUB_CPU_PARTIAL */
 
-static inline void unfreeze_partials(struct kmem_cache *s) { }
-static inline void unfreeze_partials_cpu(struct kmem_cache *s,
-				  struct kmem_cache_cpu *c) { }
+static inline void put_partials(struct kmem_cache *s) { }
+static inline void put_partials_cpu(struct kmem_cache *s,
+				    struct kmem_cache_cpu *c) { }
 
 #endif	/* CONFIG_SLUB_CPU_PARTIAL */
 
@@ -2709,7 +2709,7 @@  static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
 		stat(s, CPUSLAB_FLUSH);
 	}
 
-	unfreeze_partials_cpu(s, c);
+	put_partials_cpu(s, c);
 }
 
 struct slub_flush_work {
@@ -2737,7 +2737,7 @@  static void flush_cpu_slab(struct work_struct *w)
 	if (c->slab)
 		flush_slab(s, c);
 
-	unfreeze_partials(s);
+	put_partials(s);
 }
 
 static bool has_cpu_slab(int cpu, struct kmem_cache *s)
@@ -3168,7 +3168,7 @@  static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		if (unlikely(!node_match(slab, node) ||
 			     !pfmemalloc_match(slab, gfpflags))) {
 			slab->next = NULL;
-			__unfreeze_partials(s, slab);
+			__put_partials(s, slab);
 			continue;
 		}