@@ -4208,20 +4208,20 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
return -EINVAL;
}
-static void list_slab_objects(struct kmem_cache *s, struct page *page,
+static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
- void *addr = page_address(page);
+ void *addr = slab_address(slab);
unsigned long flags;
unsigned long *map;
void *p;
- slab_err(s, page, text, s->name);
- slab_lock(page, &flags);
+ slab_err(s, slab_page(slab), text, s->name);
+ slab_lock(slab_page(slab), &flags);
- map = get_map(s, page);
- for_each_object(p, s, addr, page->objects) {
+ map = get_map(s, slab_page(slab));
+ for_each_object(p, s, addr, slab->objects) {
if (!test_bit(__obj_to_index(s, addr, p), map)) {
pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
@@ -4229,7 +4229,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
}
}
put_map(map);
- slab_unlock(page, &flags);
+ slab_unlock(slab_page(slab), &flags);
#endif
}
@@ -4250,7 +4250,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
remove_partial(n, slab_page(slab));
list_add(&slab->slab_list, &discard);
} else {
- list_slab_objects(s, slab_page(slab),
+ list_slab_objects(s, slab,
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
Convert the one caller to pass a slab instead. Adds a little type safety. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-)