@@ -2083,7 +2083,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
int drain) { }
#endif
-static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
+static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
/*
* Try to allocate a partial slab from a specific node.
@@ -2110,7 +2110,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
void *t;
- if (!pfmemalloc_match(slab_page(slab), gfpflags))
+ if (!pfmemalloc_match(slab, gfpflags))
continue;
t = acquire_slab(s, n, slab, object == NULL, &objects);
@@ -2788,9 +2788,9 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
#endif
}
-static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
+static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
{
- if (unlikely(PageSlabPfmemalloc(page)))
+ if (unlikely(slab_test_pfmemalloc(slab)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
@@ -3017,7 +3017,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
}
}
- if (unlikely(!pfmemalloc_match(slab_page(slab), gfpflags)))
+ if (unlikely(!pfmemalloc_match(slab, gfpflags)))
/*
* For !pfmemalloc_match() case we don't load freelist so that
* we don't make further mismatched allocations easier.
Improves type safety and removes calls to slab_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)