diff mbox series

[v3,76/76] mm: memcontrol: rename memcg_cache_id to memcg_kmem_id

Message ID 20210914072938.6440-77-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Optimize list lru memory consumption | expand

Commit Message

Muchun Song Sept. 14, 2021, 7:29 a.m. UTC
The memcg_cache_id is introduced by commit 2633d7a02823 ("slab/slub:
consider a memcg parameter in kmem_create_cache"). It is used to index
in the kmem_cache->memcg_params->memcg_caches array. Since
kmem_cache->memcg_params.memcg_caches has been removed by commit
9855609bde03 ("mm: memcg/slab: use a single set of kmem_caches for
all accounted allocations"). So the name does not need to reflect cache
related. Just rename it to memcg_kmem_id. And it can reflect kmem
related.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 include/linux/memcontrol.h |  4 ++--
 mm/list_lru.c              | 14 +++++++-------
 2 files changed, 9 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 33f6ec4783f8..6541ec768a60 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1713,7 +1713,7 @@  static inline void memcg_kmem_uncharge_page(struct page *page, int order)
  * A helper for accessing memcg's kmem_id, used for getting
  * corresponding LRU lists.
  */
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
 {
 	return memcg ? memcg->kmemcg_id : -1;
 }
@@ -1751,7 +1751,7 @@  static inline bool memcg_kmem_enabled(void)
 	return false;
 }
 
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
 {
 	return -1;
 }
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 371097ee2485..8fb38dee0e99 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -74,7 +74,7 @@  list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
 	if (!memcg)
 		goto out;
 
-	l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
+	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 out:
 	if (memcg_ptr)
 		*memcg_ptr = memcg;
@@ -181,7 +181,7 @@  unsigned long list_lru_count_one(struct list_lru *lru,
 	long count = 0;
 
 	rcu_read_lock();
-	l = list_lru_from_memcg_idx(lru, nid, memcg_cache_id(memcg));
+	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
 	if (l)
 		count = READ_ONCE(l->nr_items);
 	rcu_read_unlock();
@@ -273,7 +273,7 @@  list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 	unsigned long ret;
 
 	spin_lock(&nlru->lock);
-	ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
+	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
 				  cb_arg, nr_to_walk);
 	spin_unlock(&nlru->lock);
 	return ret;
@@ -289,7 +289,7 @@  list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
 	unsigned long ret;
 
 	spin_lock_irq(&nlru->lock);
-	ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), isolate,
+	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
 				  cb_arg, nr_to_walk);
 	spin_unlock_irq(&nlru->lock);
 	return ret;
@@ -463,7 +463,7 @@  void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
 static bool memcg_list_lru_skip_alloc(struct list_lru *lru,
 				      struct mem_cgroup *memcg)
 {
-	int idx = memcg_cache_id(memcg);
+	int idx = memcg_kmem_id(memcg);
 
 	if (unlikely(idx < 0) || xa_load(&lru->xa, idx))
 		return true;
@@ -518,7 +518,7 @@  int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
 
 	xas_lock_irqsave(&xas, flags);
 	while (i--) {
-		int index = memcg_cache_id(table[i].memcg);
+		int index = memcg_kmem_id(table[i].memcg);
 		struct list_lru_memcg *mlru = table[i].mlru;
 
 		xas_set(&xas, index);
@@ -538,7 +538,7 @@  int list_lru_memcg_alloc(struct list_lru *lru, struct mem_cgroup *memcg, gfp_t g
 				 * memcg id. More details see the comments
 				 * in memcg_reparent_list_lrus().
 				 */
-				index = memcg_cache_id(table[i].memcg);
+				index = memcg_kmem_id(table[i].memcg);
 				if (index < 0)
 					xas_set_err(&xas, 0);
 				else if (!xas_error(&xas) && index != xas.xa_index)