diff mbox series

[09/12] mm, slub: split out allocations from pre/post hooks

Message ID 20221121171202.22080-10-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Introduce CONFIG_SLUB_TINY and deprecate SLOB | expand

Commit Message

Vlastimil Babka Nov. 21, 2022, 5:11 p.m. UTC
In the following patch we want to introduce CONFIG_SLUB_TINY allocation
paths that don't use the percpu slab. To prepare, refactor the
allocation functions:

Split out __slab_alloc_node() from slab_alloc_node() where the former
does the actual allocation and the latter calls the pre/post hooks.

Analogically, split out __kmem_cache_alloc_bulk() from
kmem_cache_alloc_bulk().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 127 +++++++++++++++++++++++++++++++++---------------------
 1 file changed, 77 insertions(+), 50 deletions(-)

Comments

Hyeonggon Yoo Nov. 27, 2022, 10:54 a.m. UTC | #1
On Mon, Nov 21, 2022 at 06:11:59PM +0100, Vlastimil Babka wrote:
> In the following patch we want to introduce CONFIG_SLUB_TINY allocation
> paths that don't use the percpu slab. To prepare, refactor the
> allocation functions:
> 
> Split out __slab_alloc_node() from slab_alloc_node() where the former
> does the actual allocation and the latter calls the pre/post hooks.
> 
> Analogically, split out __kmem_cache_alloc_bulk() from
> kmem_cache_alloc_bulk().
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 127 +++++++++++++++++++++++++++++++++---------------------
>  1 file changed, 77 insertions(+), 50 deletions(-)

[...]

> +
> +/* Note that interrupts must be enabled when calling this function. */
> +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
> +			  void **p)
> +{
> +	int i;
> +	struct obj_cgroup *objcg = NULL;
> +
> +	/* memcg and kmem_cache debug support */
> +	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
> +	if (unlikely(!s))
> +		return false;
> +
> +	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
> +
> +	/*
> +	 * memcg and kmem_cache debug support and memory initialization.
> +	 * Done outside of the IRQ disabled fastpath loop.
> +	 */
> +	if (i != 0)
> +		slab_post_alloc_hook(s, objcg, flags, size, p,
> +				slab_want_init_on_alloc(flags, s));

This patch looks mostly good but wondering what happens if someone calls it with size == 0
so it does not call slab_post_alloc_hook()?

> +	return i;
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
>  
> -- 
> 2.38.1
>
Vlastimil Babka Nov. 27, 2022, 11:01 p.m. UTC | #2
On 11/27/22 11:54, Hyeonggon Yoo wrote:
> On Mon, Nov 21, 2022 at 06:11:59PM +0100, Vlastimil Babka wrote:
>> In the following patch we want to introduce CONFIG_SLUB_TINY allocation
>> paths that don't use the percpu slab. To prepare, refactor the
>> allocation functions:
>> 
>> Split out __slab_alloc_node() from slab_alloc_node() where the former
>> does the actual allocation and the latter calls the pre/post hooks.
>> 
>> Analogically, split out __kmem_cache_alloc_bulk() from
>> kmem_cache_alloc_bulk().
>> 
>> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> ---
>>  mm/slub.c | 127 +++++++++++++++++++++++++++++++++---------------------
>>  1 file changed, 77 insertions(+), 50 deletions(-)
> 
> [...]
> 
>> +
>> +/* Note that interrupts must be enabled when calling this function. */
>> +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>> +			  void **p)
>> +{
>> +	int i;
>> +	struct obj_cgroup *objcg = NULL;
>> +
>> +	/* memcg and kmem_cache debug support */
>> +	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
>> +	if (unlikely(!s))
>> +		return false;
>> +
>> +	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
>> +
>> +	/*
>> +	 * memcg and kmem_cache debug support and memory initialization.
>> +	 * Done outside of the IRQ disabled fastpath loop.
>> +	 */
>> +	if (i != 0)
>> +		slab_post_alloc_hook(s, objcg, flags, size, p,
>> +				slab_want_init_on_alloc(flags, s));
> 
> This patch looks mostly good but wondering what happens if someone calls it with size == 0
> so it does not call slab_post_alloc_hook()?

Hmm looks like in that case we miss doing obj_cgroup_put(objcg) in
memcg_slab_post_alloc_hook(). Good catch, thanks!

Fixing up, also a "return false" from int function
(existed also before this patch).

----8<----
diff --git a/mm/slub.c b/mm/slub.c
index b74c4664160e..88f0ce49caab 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3888,10 +3888,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	int i;
 	struct obj_cgroup *objcg = NULL;
 
+	if (!size)
+		return 0;
+
 	/* memcg and kmem_cache debug support */
 	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
 	if (unlikely(!s))
-		return false;
+		return 0;
 
 	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
Hyeonggon Yoo Nov. 28, 2022, 1:06 p.m. UTC | #3
On Mon, Nov 28, 2022 at 12:01:39AM +0100, Vlastimil Babka wrote:
> On 11/27/22 11:54, Hyeonggon Yoo wrote:
> > On Mon, Nov 21, 2022 at 06:11:59PM +0100, Vlastimil Babka wrote:
> >> In the following patch we want to introduce CONFIG_SLUB_TINY allocation
> >> paths that don't use the percpu slab. To prepare, refactor the
> >> allocation functions:
> >> 
> >> Split out __slab_alloc_node() from slab_alloc_node() where the former
> >> does the actual allocation and the latter calls the pre/post hooks.
> >> 
> >> Analogically, split out __kmem_cache_alloc_bulk() from
> >> kmem_cache_alloc_bulk().
> >> 
> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> >> ---
> >>  mm/slub.c | 127 +++++++++++++++++++++++++++++++++---------------------
> >>  1 file changed, 77 insertions(+), 50 deletions(-)
> > 
> > [...]
> > 
> >> +
> >> +/* Note that interrupts must be enabled when calling this function. */
> >> +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
> >> +			  void **p)
> >> +{
> >> +	int i;
> >> +	struct obj_cgroup *objcg = NULL;
> >> +
> >> +	/* memcg and kmem_cache debug support */
> >> +	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
> >> +	if (unlikely(!s))
> >> +		return false;
> >> +
> >> +	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
> >> +
> >> +	/*
> >> +	 * memcg and kmem_cache debug support and memory initialization.
> >> +	 * Done outside of the IRQ disabled fastpath loop.
> >> +	 */
> >> +	if (i != 0)
> >> +		slab_post_alloc_hook(s, objcg, flags, size, p,
> >> +				slab_want_init_on_alloc(flags, s));
> > 
> > This patch looks mostly good but wondering what happens if someone calls it with size == 0
> > so it does not call slab_post_alloc_hook()?
> 
> Hmm looks like in that case we miss doing obj_cgroup_put(objcg) in
> memcg_slab_post_alloc_hook(). Good catch, thanks!
> 
> Fixing up, also a "return false" from int function
> (existed also before this patch).
> 
> ----8<----
> diff --git a/mm/slub.c b/mm/slub.c
> index b74c4664160e..88f0ce49caab 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3888,10 +3888,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>  	int i;
>  	struct obj_cgroup *objcg = NULL;
>  
> +	if (!size)
> +		return 0;
> +
>  	/* memcg and kmem_cache debug support */
>  	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
>  	if (unlikely(!s))
> -		return false;
> +		return 0;
>  
>  	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);

With that,

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks!
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index fd56d7cca9c2..5677db3f6d15 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2907,10 +2907,10 @@  static unsigned long count_partial(struct kmem_cache_node *n,
 }
 #endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
 
+#ifdef CONFIG_SLUB_DEBUG
 static noinline void
 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
 {
-#ifdef CONFIG_SLUB_DEBUG
 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 				      DEFAULT_RATELIMIT_BURST);
 	int node;
@@ -2941,8 +2941,11 @@  slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
 			node, nr_slabs, nr_objs, nr_free);
 	}
-#endif
 }
+#else /* CONFIG_SLUB_DEBUG */
+static inline void
+slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
+#endif
 
 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
 {
@@ -3239,45 +3242,13 @@  static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	return p;
 }
 
-/*
- * If the object has been wiped upon free, make sure it's fully initialized by
- * zeroing out freelist pointer.
- */
-static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
-						   void *obj)
-{
-	if (unlikely(slab_want_init_on_free(s)) && obj)
-		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
-			0, sizeof(void *));
-}
-
-/*
- * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
- * have the fastpath folded into their functions. So no function call
- * overhead for requests that can be satisfied on the fastpath.
- *
- * The fastpath works by first checking if the lockless freelist can be used.
- * If not then __slab_alloc is called for slow processing.
- *
- * Otherwise we can simply pick the next object from the lockless free list.
- */
-static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
+static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
-	void *object;
 	struct kmem_cache_cpu *c;
 	struct slab *slab;
 	unsigned long tid;
-	struct obj_cgroup *objcg = NULL;
-	bool init = false;
-
-	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
-	if (!s)
-		return NULL;
-
-	object = kfence_alloc(s, orig_size, gfpflags);
-	if (unlikely(object))
-		goto out;
+	void *object;
 
 redo:
 	/*
@@ -3347,6 +3318,48 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
 		stat(s, ALLOC_FASTPATH);
 	}
 
+	return object;
+}
+
+/*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+						   void *obj)
+{
+	if (unlikely(slab_want_init_on_free(s)) && obj)
+		memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+			0, sizeof(void *));
+}
+
+/*
+ * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
+ * have the fastpath folded into their functions. So no function call
+ * overhead for requests that can be satisfied on the fastpath.
+ *
+ * The fastpath works by first checking if the lockless freelist can be used.
+ * If not then __slab_alloc is called for slow processing.
+ *
+ * Otherwise we can simply pick the next object from the lockless free list.
+ */
+static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
+		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
+{
+	void *object;
+	struct obj_cgroup *objcg = NULL;
+	bool init = false;
+
+	s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
+	if (!s)
+		return NULL;
+
+	object = kfence_alloc(s, orig_size, gfpflags);
+	if (unlikely(object))
+		goto out;
+
+	object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
+
 	maybe_wipe_obj_freeptr(s, object);
 	init = slab_want_init_on_alloc(gfpflags, s);
 
@@ -3799,18 +3812,12 @@  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-/* Note that interrupts must be enabled when calling this function. */
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
-			  void **p)
+static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
+			size_t size, void **p, struct obj_cgroup *objcg)
 {
 	struct kmem_cache_cpu *c;
 	int i;
-	struct obj_cgroup *objcg = NULL;
 
-	/* memcg and kmem_cache debug support */
-	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
-	if (unlikely(!s))
-		return false;
 	/*
 	 * Drain objects in the per cpu slab, while disabling local
 	 * IRQs, which protects against PREEMPT and interrupts
@@ -3864,18 +3871,38 @@  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	local_unlock_irq(&s->cpu_slab->lock);
 	slub_put_cpu_ptr(s->cpu_slab);
 
-	/*
-	 * memcg and kmem_cache debug support and memory initialization.
-	 * Done outside of the IRQ disabled fastpath loop.
-	 */
-	slab_post_alloc_hook(s, objcg, flags, size, p,
-				slab_want_init_on_alloc(flags, s));
 	return i;
+
 error:
 	slub_put_cpu_ptr(s->cpu_slab);
 	slab_post_alloc_hook(s, objcg, flags, i, p, false);
 	kmem_cache_free_bulk(s, i, p);
 	return 0;
+
+}
+
+/* Note that interrupts must be enabled when calling this function. */
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+			  void **p)
+{
+	int i;
+	struct obj_cgroup *objcg = NULL;
+
+	/* memcg and kmem_cache debug support */
+	s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
+	if (unlikely(!s))
+		return false;
+
+	i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
+
+	/*
+	 * memcg and kmem_cache debug support and memory initialization.
+	 * Done outside of the IRQ disabled fastpath loop.
+	 */
+	if (i != 0)
+		slab_post_alloc_hook(s, objcg, flags, size, p,
+				slab_want_init_on_alloc(flags, s));
+	return i;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);