@@ -2298,25 +2298,25 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
}
/*
- * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
+ * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
* unfreezes the slabs and puts it on the proper list.
* Assumes the slab has been already safely taken away from kmem_cache_cpu
* by the caller.
*/
-static void deactivate_slab(struct kmem_cache *s, struct page *page,
+static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ struct kmem_cache_node *n = get_node(s, slab_nid(slab));
int lock = 0, free_delta = 0;
enum slab_modes l = M_NONE, m = M_NONE;
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
unsigned long flags = 0;
- struct page new;
- struct page old;
+ struct slab new;
+ struct slab old;
- if (page->freelist) {
+ if (slab->freelist) {
stat(s, DEACTIVATE_REMOTE_FREES);
tail = DEACTIVATE_TO_TAIL;
}
@@ -2335,7 +2335,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* 'freelist_iter' is already corrupted. So isolate all objects
* starting at 'freelist_iter' by skipping them.
*/
- if (freelist_corrupted(s, page, &freelist_iter, nextfree))
+ if (freelist_corrupted(s, slab_page(slab), &freelist_iter, nextfree))
break;
freelist_tail = freelist_iter;
@@ -2345,25 +2345,25 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
}
/*
- * Stage two: Unfreeze the page while splicing the per-cpu
- * freelist to the head of page's freelist.
+ * Stage two: Unfreeze the slab while splicing the per-cpu
+ * freelist to the head of slab's freelist.
*
- * Ensure that the page is unfrozen while the list presence
+ * Ensure that the slab is unfrozen while the list presence
* reflects the actual number of objects during unfreeze.
*
* We setup the list membership and then perform a cmpxchg
- * with the count. If there is a mismatch then the page
- * is not unfrozen but the page is on the wrong list.
+ * with the count. If there is a mismatch then the slab
+ * is not unfrozen but the slab is on the wrong list.
*
* Then we restart the process which may have to remove
- * the page from the list that we just put it on again
+ * the slab from the list that we just put it on again
* because the number of objects in the slab may have
* changed.
*/
redo:
- old.freelist = READ_ONCE(page->freelist);
- old.counters = READ_ONCE(page->counters);
+ old.freelist = READ_ONCE(slab->freelist);
+ old.counters = READ_ONCE(slab->counters);
VM_BUG_ON(!old.frozen);
/* Determine target state of the slab */
@@ -2385,7 +2385,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
lock = 1;
/*
* Taking the spinlock removes the possibility
- * that acquire_slab() will see a slab page that
+ * that acquire_slab() will see a slab that
* is frozen
*/
spin_lock_irqsave(&n->list_lock, flags);
@@ -2405,18 +2405,18 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
if (l != m) {
if (l == M_PARTIAL)
- remove_partial(n, page);
+ remove_partial(n, slab_page(slab));
else if (l == M_FULL)
- remove_full(s, n, page);
+ remove_full(s, n, slab_page(slab));
if (m == M_PARTIAL)
- add_partial(n, page, tail);
+ add_partial(n, slab_page(slab), tail);
else if (m == M_FULL)
- add_full(s, n, page);
+ add_full(s, n, slab_page(slab));
}
l = m;
- if (!cmpxchg_double_slab(s, page,
+ if (!cmpxchg_double_slab(s, slab_page(slab),
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab"))
@@ -2431,7 +2431,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
stat(s, DEACTIVATE_FULL);
else if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
- discard_slab(s, page);
+ discard_slab(s, slab_page(slab));
stat(s, FREE_SLAB);
}
}
@@ -2603,7 +2603,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
if (slab) {
- deactivate_slab(s, slab_page(slab), freelist);
+ deactivate_slab(s, slab, freelist);
stat(s, CPUSLAB_FLUSH);
}
}
@@ -2619,7 +2619,7 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
c->tid = next_tid(c->tid);
if (slab) {
- deactivate_slab(s, slab_page(slab), freelist);
+ deactivate_slab(s, slab, freelist);
stat(s, CPUSLAB_FLUSH);
}
@@ -2961,7 +2961,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c->slab = NULL;
c->freelist = NULL;
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- deactivate_slab(s, slab_page(slab), freelist);
+ deactivate_slab(s, slab, freelist);
new_slab:
@@ -3043,7 +3043,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- deactivate_slab(s, slab_page(flush_slab), flush_freelist);
+ deactivate_slab(s, flush_slab, flush_freelist);
stat(s, CPUSLAB_FLUSH);
@@ -3055,7 +3055,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return_single:
- deactivate_slab(s, slab_page(slab), get_freepointer(s, freelist));
+ deactivate_slab(s, slab, get_freepointer(s, freelist));
return freelist;
}
Improves type safety and removes calls to slab_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-)