@@ -1379,7 +1379,7 @@ static struct page *kmem_getpages(struct
return NULL;
}
- charge_slab_page(page, cachep->gfporder, cachep);
+ account_slab_page(page, cachep->gfporder, cachep);
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1403,7 +1403,7 @@ static void kmem_freepages(struct kmem_c
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
- uncharge_slab_page(page, order, cachep);
+ unaccount_slab_page(page, order, cachep);
__free_pages(page, order);
}
@@ -423,15 +423,15 @@ static inline struct kmem_cache *virt_to
return page->slab_cache;
}
-static __always_inline void charge_slab_page(struct page *page, int order,
- struct kmem_cache *s)
+static __always_inline void account_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
{
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
PAGE_SIZE << order);
}
-static __always_inline void uncharge_slab_page(struct page *page, int order,
- struct kmem_cache *s)
+static __always_inline void unaccount_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
{
if (memcg_kmem_enabled())
memcg_free_page_obj_cgroups(page);
@@ -1621,7 +1621,7 @@ static inline struct page *alloc_slab_pa
page = __alloc_pages_node(node, flags, order);
if (page)
- charge_slab_page(page, order, s);
+ account_slab_page(page, order, s);
return page;
}
@@ -1844,7 +1844,7 @@ static void __free_slab(struct kmem_cach
page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- uncharge_slab_page(page, order, s);
+ unaccount_slab_page(page, order, s);
__free_pages(page, order);
}