diff mbox series

[2/2] mm/slab_common: move generic bulk alloc/free functions to SLOB

Message ID 20220614152635.23353-3-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series slab bulk alloc/free cleanups | expand

Commit Message

Hyeonggon Yoo June 14, 2022, 3:26 p.m. UTC
Now that only SLOB use __kmem_cache_{alloc,free}_bulk(), move them to
SLOB. No functional change intended.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slab.h        |  9 ---------
 mm/slab_common.c | 27 ---------------------------
 mm/slob.c        | 25 +++++++++++++++++++++----
 3 files changed, 21 insertions(+), 40 deletions(-)
diff mbox series

Patch

diff --git a/mm/slab.h b/mm/slab.h
index db9fb5c8dae7..a6837605e4cc 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -380,15 +380,6 @@  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 		       size_t count, loff_t *ppos);
 
-/*
- * Generic implementation of bulk operations
- * These are useful for situations in which the allocator cannot
- * perform optimizations. In that case segments of the object listed
- * may be allocated or freed using these operations.
- */
-void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
-
 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
 {
 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 77c3adf40e50..a6787fd39aa4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -105,33 +105,6 @@  static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
 }
 #endif
 
-void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
-{
-	size_t i;
-
-	for (i = 0; i < nr; i++) {
-		if (s)
-			kmem_cache_free(s, p[i]);
-		else
-			kfree(p[i]);
-	}
-}
-
-int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
-								void **p)
-{
-	size_t i;
-
-	for (i = 0; i < nr; i++) {
-		void *x = p[i] = kmem_cache_alloc(s, flags);
-		if (!x) {
-			__kmem_cache_free_bulk(s, i, p);
-			return 0;
-		}
-	}
-	return i;
-}
-
 /*
  * Figure out what the alignment of the objects will be given a set of
  * flags, a user specified alignment and the size of the objects.
diff --git a/mm/slob.c b/mm/slob.c
index f47811f09aca..f8babd0806ee 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -692,16 +692,33 @@  void kmem_cache_free(struct kmem_cache *c, void *b)
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
 {
-	__kmem_cache_free_bulk(s, size, p);
+	size_t i;
+
+	for (i = 0; i < nr; i++) {
+		if (s)
+			kmem_cache_free(s, p[i]);
+		else
+			kfree(p[i]);
+	}
 }
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
 								void **p)
 {
-	return __kmem_cache_alloc_bulk(s, flags, size, p);
+	size_t i;
+
+	for (i = 0; i < nr; i++) {
+		void *x = p[i] = kmem_cache_alloc(s, flags);
+
+		if (!x) {
+			kmem_cache_free_bulk(s, i, p);
+			return 0;
+		}
+	}
+	return i;
 }
 EXPORT_SYMBOL(kmem_cache_alloc_bulk);