diff mbox series

[41/62] mm/slub: Convert check_slab() to struct slab

Message ID 20211004134650.4031813-42-willy@infradead.org (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Matthew Wilcox Oct. 4, 2021, 1:46 p.m. UTC
Also convert slab_pad_check() to struct slab.  Improves type safety
and pushes down a few calls to slab_page().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 44 ++++++++++++++++++++++----------------------
 1 file changed, 22 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 10db0ce7fe2a..b1122b8cb36f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -992,8 +992,8 @@  static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
 			p + off, POISON_INUSE, size_from_object(s) - off);
 }
 
-/* Check the pad bytes at the end of a slab page */
-static int slab_pad_check(struct kmem_cache *s, struct page *page)
+/* Check the pad bytes at the end of a slab */
+static int slab_pad_check(struct kmem_cache *s, struct slab *slab)
 {
 	u8 *start;
 	u8 *fault;
@@ -1005,8 +1005,8 @@  static int slab_pad_check(struct kmem_cache *s, struct page *page)
 	if (!(s->flags & SLAB_POISON))
 		return 1;
 
-	start = page_address(page);
-	length = page_size(page);
+	start = slab_address(slab);
+	length = slab_size(slab);
 	end = start + length;
 	remainder = length % s->size;
 	if (!remainder)
@@ -1021,7 +1021,7 @@  static int slab_pad_check(struct kmem_cache *s, struct page *page)
 	while (end > fault && end[-1] == POISON_INUSE)
 		end--;
 
-	slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+	slab_err(s, slab_page(slab), "Padding overwritten. 0x%p-0x%p @offset=%tu",
 			fault, end - 1, fault - start);
 	print_section(KERN_ERR, "Padding ", pad, remainder);
 
@@ -1085,28 +1085,28 @@  static int check_object(struct kmem_cache *s, struct slab *slab,
 	return 1;
 }
 
-static int check_slab(struct kmem_cache *s, struct page *page)
+static int check_slab(struct kmem_cache *s, struct slab *slab)
 {
 	int maxobj;
 
-	if (!PageSlab(page)) {
-		slab_err(s, page, "Not a valid slab page");
+	if (!slab_test_cache(slab)) {
+		slab_err(s, slab_page(slab), "Not a valid slab page");
 		return 0;
 	}
 
-	maxobj = order_objects(compound_order(page), s->size);
-	if (page->objects > maxobj) {
-		slab_err(s, page, "objects %u > max %u",
-			page->objects, maxobj);
+	maxobj = order_objects(slab_order(slab), s->size);
+	if (slab->objects > maxobj) {
+		slab_err(s, slab_page(slab), "objects %u > max %u",
+			slab->objects, maxobj);
 		return 0;
 	}
-	if (page->inuse > page->objects) {
-		slab_err(s, page, "inuse %u > max %u",
-			page->inuse, page->objects);
+	if (slab->inuse > slab->objects) {
+		slab_err(s, slab_page(slab), "inuse %u > max %u",
+			slab->inuse, slab->objects);
 		return 0;
 	}
-	/* Slab_pad_check fixes things up after itself */
-	slab_pad_check(s, page);
+	/* slab_pad_check fixes things up after itself */
+	slab_pad_check(s, slab);
 	return 1;
 }
 
@@ -1263,7 +1263,7 @@  void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
 static inline int alloc_consistency_checks(struct kmem_cache *s,
 					struct slab *slab, void *object)
 {
-	if (!check_slab(s, slab_page(slab)))
+	if (!check_slab(s, slab))
 		return 0;
 
 	if (!check_valid_pointer(s, slab_page(slab), object)) {
@@ -1355,7 +1355,7 @@  static noinline int free_debug_processing(
 	slab_lock(slab_page(slab), &flags2);
 
 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-		if (!check_slab(s, slab_page(slab)))
+		if (!check_slab(s, slab))
 			goto out;
 	}
 
@@ -1611,7 +1611,7 @@  static inline int free_debug_processing(
 	void *head, void *tail, int bulk_cnt,
 	unsigned long addr) { return 0; }
 
-static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
+static inline int slab_pad_check(struct kmem_cache *s, struct slab *slab)
 			{ return 1; }
 static inline int check_object(struct kmem_cache *s, struct slab *slab,
 			void *object, u8 val) { return 1; }
@@ -1969,7 +1969,7 @@  static void __free_slab(struct kmem_cache *s, struct slab *slab)
 	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
 		void *p;
 
-		slab_pad_check(s, slab_page(slab));
+		slab_pad_check(s, slab);
 		for_each_object(p, s, slab_address(slab), slab->objects)
 			check_object(s, slab, p, SLUB_RED_INACTIVE);
 	}
@@ -4959,7 +4959,7 @@  static void validate_slab(struct kmem_cache *s, struct slab *slab,
 
 	slab_lock(slab_page(slab), &flags);
 
-	if (!check_slab(s, slab_page(slab)) || !on_freelist(s, slab, NULL))
+	if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
 		goto unlock;
 
 	/* Now we know that a valid freelist exists */