@@ -2013,27 +2013,27 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
* Management of partially allocated slabs.
*/
static inline void
-__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
+__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
{
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
- list_add_tail(&page->slab_list, &n->partial);
+ list_add_tail(&slab->slab_list, &n->partial);
else
- list_add(&page->slab_list, &n->partial);
+ list_add(&slab->slab_list, &n->partial);
}
static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+ struct slab *slab, int tail)
{
lockdep_assert_held(&n->list_lock);
- __add_partial(n, page, tail);
+ __add_partial(n, slab, tail);
}
static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+ struct slab *slab)
{
lockdep_assert_held(&n->list_lock);
- list_del(&page->slab_list);
+ list_del(&slab->slab_list);
n->nr_partial--;
}
@@ -2078,7 +2078,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
"acquire_slab"))
return NULL;
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
WARN_ON(!freelist);
return freelist;
}
@@ -2405,12 +2405,12 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
if (l != m) {
if (l == M_PARTIAL)
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
else if (l == M_FULL)
remove_full(s, n, slab_page(slab));
if (m == M_PARTIAL)
- add_partial(n, slab_page(slab), tail);
+ add_partial(n, slab, tail);
else if (m == M_FULL)
add_full(s, n, slab_page(slab));
}
@@ -2479,7 +2479,7 @@ static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab)
slab->next = unusable;
unusable = slab;
} else {
- add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
}
@@ -3367,7 +3367,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
remove_full(s, n, slab_page(slab));
- add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
+ add_partial(n, slab, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
@@ -3378,7 +3378,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
/*
* Slab on the partial list.
*/
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
stat(s, FREE_REMOVE_PARTIAL);
} else {
/* Slab must be on the full list */
@@ -3922,7 +3922,7 @@ static void early_kmem_cache_node_alloc(int node)
* No locks need to be taken here as it has just been
* initialized and there is no concurrent access.
*/
- __add_partial(n, slab_page(slab), DEACTIVATE_TO_HEAD);
+ __add_partial(n, slab, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -4180,7 +4180,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
#endif
/*
- * The larger the object size is, the more pages we want on the partial
+ * The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
set_min_partial(s, ilog2(s->size) / 2);
@@ -4247,7 +4247,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irq(&n->list_lock);
list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
if (!slab->inuse) {
- remove_partial(n, slab_page(slab));
+ remove_partial(n, slab);
list_add(&slab->slab_list, &discard);
} else {
list_slab_objects(s, slab,
Convert __add_partial(), add_partial() and remove_partial(). Improves type safety and removes calls to slab_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-)