diff mbox series

[23/62] mm/slub: Convert slab_alloc_node() to use a struct slab

Message ID 20211004134650.4031813-24-willy@infradead.org (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Matthew Wilcox Oct. 4, 2021, 1:46 p.m. UTC
Adds a little type safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 29703bba0a7f..fd04aa96602c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3112,7 +3112,7 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 {
 	void *object;
 	struct kmem_cache_cpu *c;
-	struct page *page;
+	struct slab *slab;
 	unsigned long tid;
 	struct obj_cgroup *objcg = NULL;
 	bool init = false;
@@ -3144,9 +3144,9 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	/*
 	 * Irqless object alloc/free algorithm used here depends on sequence
 	 * of fetching cpu_slab's data. tid should be fetched before anything
-	 * on c to guarantee that object and page associated with previous tid
+	 * on c to guarantee that object and slab associated with previous tid
 	 * won't be used with current tid. If we fetch tid first, object and
-	 * page could be one associated with next tid and our alloc/free
+	 * slab could be one associated with next tid and our alloc/free
 	 * request will be failed. In this case, we will retry. So, no problem.
 	 */
 	barrier();
@@ -3159,7 +3159,7 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	 */
 
 	object = c->freelist;
-	page = slab_page(c->slab);
+	slab = c->slab;
 	/*
 	 * We cannot use the lockless fastpath on PREEMPT_RT because if a
 	 * slowpath has taken the local_lock_irqsave(), it is not protected
@@ -3168,7 +3168,7 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	 * there is a suitable cpu freelist.
 	 */
 	if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
-	    unlikely(!object || !page || !node_match(page, node))) {
+	    unlikely(!object || !slab || !node_match(slab_page(slab), node))) {
 		object = __slab_alloc(s, gfpflags, node, addr, c);
 	} else {
 		void *next_object = get_freepointer_safe(s, object);