diff mbox series

[RFC,v1,1/5] mm, slub: fix bulk alloc and free stats

Message ID 20230808095342.12637-8-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series SLUB percpu array caches and maple tree nodes | expand

Commit Message

Vlastimil Babka Aug. 8, 2023, 9:53 a.m. UTC
The SLUB sysfs stats enabled CONFIG_SLUB_STATS have two deficiencies
identified wrt bulk alloc/free operations:

- Bulk allocations from cpu freelist are not counted. Add the
  ALLOC_FASTPATH counter there.

- Bulk fastpath freeing will count a list of multiple objects with a
  single FREE_FASTPATH inc. Add a stat_add() variant to count them all.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

Comments

Hyeonggon Yoo Aug. 18, 2023, 11:47 a.m. UTC | #1
On Tue, Aug 8, 2023 at 6:53 PM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> The SLUB sysfs stats enabled CONFIG_SLUB_STATS have two deficiencies
> identified wrt bulk alloc/free operations:
>
> - Bulk allocations from cpu freelist are not counted. Add the
>   ALLOC_FASTPATH counter there.
>
> - Bulk fastpath freeing will count a list of multiple objects with a
>   single FREE_FASTPATH inc. Add a stat_add() variant to count them all.
>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 11 ++++++++++-
>  1 file changed, 10 insertions(+), 1 deletion(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index e3b5d5c0eb3a..a9437d48840c 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -341,6 +341,14 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
>  #endif
>  }
>
> +static inline void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
> +{
> +#ifdef CONFIG_SLUB_STATS
> +       raw_cpu_add(s->cpu_slab->stat[si], v);
> +#endif
> +}
> +
> +
>  /*
>   * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
>   * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
> @@ -3776,7 +3784,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
>
>                 local_unlock(&s->cpu_slab->lock);
>         }
> -       stat(s, FREE_FASTPATH);
> +       stat_add(s, FREE_FASTPATH, cnt);

Should bulk free slowpath also be counted in the same way?

>  }
>  #else /* CONFIG_SLUB_TINY */
>  static void do_slab_free(struct kmem_cache *s,
> @@ -3978,6 +3986,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
>                 c->freelist = get_freepointer(s, object);
>                 p[i] = object;
>                 maybe_wipe_obj_freeptr(s, p[i]);
> +               stat(s, ALLOC_FASTPATH);
>         }
>         c->tid = next_tid(c->tid);
>         local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
> --
> 2.41.0
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index e3b5d5c0eb3a..a9437d48840c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -341,6 +341,14 @@  static inline void stat(const struct kmem_cache *s, enum stat_item si)
 #endif
 }
 
+static inline void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
+{
+#ifdef CONFIG_SLUB_STATS
+	raw_cpu_add(s->cpu_slab->stat[si], v);
+#endif
+}
+
+
 /*
  * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
  * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
@@ -3776,7 +3784,7 @@  static __always_inline void do_slab_free(struct kmem_cache *s,
 
 		local_unlock(&s->cpu_slab->lock);
 	}
-	stat(s, FREE_FASTPATH);
+	stat_add(s, FREE_FASTPATH, cnt);
 }
 #else /* CONFIG_SLUB_TINY */
 static void do_slab_free(struct kmem_cache *s,
@@ -3978,6 +3986,7 @@  static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
 		c->freelist = get_freepointer(s, object);
 		p[i] = object;
 		maybe_wipe_obj_freeptr(s, p[i]);
+		stat(s, ALLOC_FASTPATH);
 	}
 	c->tid = next_tid(c->tid);
 	local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);