@@ -762,7 +762,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
else
__count_vm_events(PGINODESTEAL, reap);
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += reap;
+ current->reclaim_state->reclaimed_pages += reap;
}
iput(inode);
spin_lock(lru_lock);
@@ -126,10 +126,11 @@ union swap_header {
/*
* current->reclaim_state points to one of these when a task is running
- * memory reclaim
+ * memory reclaim. It is typically used by shrinkers to return reclaim
+ * information back to the main vmscan loop.
*/
struct reclaim_state {
- unsigned long reclaimed_slab;
+ unsigned long reclaimed_pages; /* pages freed by shrinkers */
};
#ifdef __KERNEL__
@@ -1396,7 +1396,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL;
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += 1 << order;
+ current->reclaim_state->reclaimed_pages += 1 << order;
uncharge_slab_page(page, order, cachep);
__free_pages(page, order);
}
@@ -208,7 +208,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
static void slob_free_pages(void *b, int order)
{
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += 1 << order;
+ current->reclaim_state->reclaimed_pages += 1 << order;
free_pages((unsigned long)b, order);
}
@@ -1743,7 +1743,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page->mapping = NULL;
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += pages;
+ current->reclaim_state->reclaimed_pages += pages;
uncharge_slab_page(page, order, s);
__free_pages(page, order);
}
@@ -2765,8 +2765,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
if (reclaim_state) {
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
+ sc->nr_reclaimed += reclaim_state->reclaimed_pages;
+ reclaim_state->reclaimed_pages = 0;
}
/* Record the subtree's reclaim efficiency */