@@ -2044,12 +2044,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
* Returns a list of objects or NULL if it fails.
*/
static inline void *acquire_slab(struct kmem_cache *s,
- struct kmem_cache_node *n, struct page *page,
+ struct kmem_cache_node *n, struct slab *slab,
int mode, int *objects)
{
void *freelist;
unsigned long counters;
- struct page new;
+ struct slab new;
lockdep_assert_held(&n->list_lock);
@@ -2058,12 +2058,12 @@ static inline void *acquire_slab(struct kmem_cache *s,
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
- freelist = page->freelist;
- counters = page->counters;
+ freelist = slab->freelist;
+ counters = slab->counters;
new.counters = counters;
*objects = new.objects - new.inuse;
if (mode) {
- new.inuse = page->objects;
+ new.inuse = slab->objects;
new.freelist = NULL;
} else {
new.freelist = freelist;
@@ -2072,13 +2072,13 @@ static inline void *acquire_slab(struct kmem_cache *s,
VM_BUG_ON(new.frozen);
new.frozen = 1;
- if (!__cmpxchg_double_slab(s, page,
+ if (!__cmpxchg_double_slab(s, slab_page(slab),
freelist, counters,
new.freelist, new.counters,
"acquire_slab"))
return NULL;
- remove_partial(n, page);
+ remove_partial(n, slab_page(slab));
WARN_ON(!freelist);
return freelist;
}
@@ -2119,7 +2119,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!pfmemalloc_match(slab_page(slab), gfpflags))
continue;
- t = acquire_slab(s, n, slab_page(slab), object == NULL, &objects);
+ t = acquire_slab(s, n, slab, object == NULL, &objects);
if (!t)
break;
Improves type safety. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-)