diff mbox series

[RFC,4/4] slab: don't batch kvfree_rcu() with SLUB_TINY

Message ID 20250123-slub-tiny-kfree_rcu-v1-4-0e386ef1541a@suse.cz (mailing list archive)
State New
Headers show
Series slab, rcu: move and consolidate TINY_RCU kvfree_rcu() to SLAB | expand

Commit Message

Vlastimil Babka Jan. 23, 2025, 10:37 a.m. UTC
kvfree_rcu() is batched for better performance except on TINY_RCU, which
is a simple implementation for small UP systems. Similarly SLUB_TINY is
an option intended for small systems, whether or not used together with
TINY_RCU. In case SLUB_TINY is used with !TINY_RCU, it makes arguably
sense to not do the batching and limit the memory footprint. It's also
suboptimal to have RCU-specific #ifdefs in slab code.

With that, add CONFIG_KFREE_RCU_BATCHED to determine whether batching
kvfree_rcu() implementation is used. It is not set by a user prompt, but
enabled by default and disabled in case TINY_RCU or SLUB_TINY are
enabled.

Use the new config for #ifdef's in slab code and extend their scope to
cover all code used by the batched kvfree_rcu(). For example there's no
need to perform kvfree_rcu_init() if the batching is disabled.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/slab.h |  2 +-
 mm/Kconfig           |  4 ++++
 mm/slab_common.c     | 45 +++++++++++++++++++++++++--------------------
 3 files changed, 30 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index bcc62e5656c35c6a3f4caf26fb33d7447dead39a..9faf33734a8eee2425b90e679c0457ab459422a3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1083,7 +1083,7 @@  extern void kvfree_sensitive(const void *addr, size_t len);
 
 unsigned int kmem_cache_size(struct kmem_cache *s);
 
-#ifdef CONFIG_TINY_RCU
+#ifndef CONFIG_KFREE_RCU_BATCHED
 static inline void kvfree_rcu_barrier(void)
 {
 	rcu_barrier();
diff --git a/mm/Kconfig b/mm/Kconfig
index 84000b01680869801a10f56f06d0c43d6521a8d2..e513308a4aed640ee556ecb5793c7f3f195bbcae 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -242,6 +242,10 @@  menu "Slab allocator options"
 config SLUB
 	def_bool y
 
+config KFREE_RCU_BATCHED
+	def_bool y
+	depends on !SLUB_TINY && !TINY_RCU
+
 config SLUB_TINY
 	bool "Configure for minimal memory footprint"
 	depends on EXPERT
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f13d2c901daf1419993620459fbd5845eecb85f1..9f6d66313afc6684bdc0f32908fe01c83c60f283 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1284,6 +1284,28 @@  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
 EXPORT_TRACEPOINT_SYMBOL(kfree);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
 
+#ifndef CONFIG_KFREE_RCU_BATCHED
+
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+{
+	if (head) {
+		kasan_record_aux_stack_noalloc(ptr);
+		call_rcu(head, kvfree_rcu_cb);
+		return;
+	}
+
+	// kvfree_rcu(one_arg) call.
+	might_sleep();
+	synchronize_rcu();
+	kvfree(ptr);
+}
+
+void __init kvfree_rcu_init(void)
+{
+}
+
+#else /* CONFIG_KFREE_RCU_BATCHED */
+
 /*
  * This rcu parameter is runtime-read-only. It reflects
  * a minimum allowed number of objects which can be cached
@@ -1858,24 +1880,6 @@  add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
 	return true;
 }
 
-#ifdef CONFIG_TINY_RCU
-
-void kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
-	if (head) {
-		kasan_record_aux_stack_noalloc(ptr);
-		call_rcu(head, kvfree_rcu_cb);
-		return;
-	}
-
-	// kvfree_rcu(one_arg) call.
-	might_sleep();
-	synchronize_rcu();
-	kvfree(ptr);
-}
-
-#else /* !CONFIG_TINY_RCU */
-
 static enum hrtimer_restart
 schedule_page_work_fn(struct hrtimer *t)
 {
@@ -2084,8 +2088,6 @@  void kvfree_rcu_barrier(void)
 }
 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
 
-#endif /* !CONFIG_TINY_RCU */
-
 static unsigned long
 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
@@ -2175,3 +2177,6 @@  void __init kvfree_rcu_init(void)
 
 	shrinker_register(kfree_rcu_shrinker);
 }
+
+#endif /* CONFIG_KFREE_RCU_BATCHED */
+