diff mbox series

[RFC,4/6] mm, slab: simplify kmem_cache_release()

Message ID 20240715-b4-slab-kfree_rcu-destroy-v1-4-46b2984c2205@suse.cz (mailing list archive)
State Superseded
Headers show
Series mm, slub: handle pending kfree_rcu() in kmem_cache_destroy() | expand

Commit Message

Vlastimil Babka July 15, 2024, 8:29 p.m. UTC
kfence_shutdown_cache() is now called always just before
kmem_cache_release() so move it there.

Also replace two variants of the functions by using
__is_defined(SLAB_SUPPORTS_SYSFS).

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slab_common.c | 18 ++++--------------
 1 file changed, 4 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2eef5ad37fa7..57962e1a5a86 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -486,7 +486,6 @@  kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
 }
 EXPORT_SYMBOL(kmem_buckets_create);
 
-#ifdef SLAB_SUPPORTS_SYSFS
 /*
  * For a given kmem_cache, kmem_cache_destroy() should only be called
  * once or there will be a use-after-free problem. The actual deletion
@@ -495,18 +494,12 @@  EXPORT_SYMBOL(kmem_buckets_create);
  */
 static void kmem_cache_release(struct kmem_cache *s)
 {
-	if (slab_state >= FULL) {
+	kfence_shutdown_cache(s);
+	if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL)
 		sysfs_slab_release(s);
-	} else {
+	else
 		slab_kmem_cache_release(s);
-	}
-}
-#else
-static void kmem_cache_release(struct kmem_cache *s)
-{
-	slab_kmem_cache_release(s);
 }
-#endif
 
 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 {
@@ -531,10 +524,8 @@  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 
 	rcu_barrier();
 
-	list_for_each_entry_safe(s, s2, &to_destroy, list) {
-		kfence_shutdown_cache(s);
+	list_for_each_entry_safe(s, s2, &to_destroy, list)
 		kmem_cache_release(s);
-	}
 }
 
 void slab_kmem_cache_release(struct kmem_cache *s)
@@ -591,7 +582,6 @@  void kmem_cache_destroy(struct kmem_cache *s)
 		schedule_work(&slab_caches_to_rcu_destroy_work);
 		mutex_unlock(&slab_mutex);
 	} else {
-		kfence_shutdown_cache(s);
 		kmem_cache_release(s);
 	}
 }