diff mbox series

[6/9] mm: list_lru: support for shrinking list lru

Message ID 20210428094949.43579-7-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Shrink the list lru size on memory cgroup removal | expand

Commit Message

Muchun Song April 28, 2021, 9:49 a.m. UTC
Now memcg_update_all_list_lrus() only can increase the size of all the
list lrus. This patch adds an ability to make it can shrink the size
of all the list lrus. This can help us save memory when the user want
to shrink the size.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/list_lru.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 49 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/mm/list_lru.c b/mm/list_lru.c
index d78dba5a6dab..3ee5239922c9 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -383,13 +383,11 @@  static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
 	kvfree(memcg_lrus);
 }
 
-static int memcg_update_list_lru_node(struct list_lru_node *nlru,
-				      int old_size, int new_size)
+static int memcg_list_lru_node_inc(struct list_lru_node *nlru,
+				   int old_size, int new_size)
 {
 	struct list_lru_memcg *old, *new;
 
-	BUG_ON(old_size > new_size);
-
 	old = rcu_dereference_protected(nlru->memcg_lrus,
 					lockdep_is_held(&list_lrus_mutex));
 	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
@@ -418,11 +416,58 @@  static int memcg_update_list_lru_node(struct list_lru_node *nlru,
 	return 0;
 }
 
+/* This function always returns 0. */
+static int memcg_list_lru_node_dec(struct list_lru_node *nlru,
+				   int old_size, int new_size)
+{
+	struct list_lru_memcg *old, *new;
+
+	old = rcu_dereference_protected(nlru->memcg_lrus,
+					lockdep_is_held(&list_lrus_mutex));
+	__memcg_destroy_list_lru_node(old, new_size, old_size);
+
+	/* Reuse the old array if the allocation failures here. */
+	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
+	if (!new)
+		return 0;
+
+	memcpy(&new->lru, &old->lru, new_size * sizeof(void *));
+
+	/*
+	 * The locking below allows readers that hold nlru->lock avoid taking
+	 * rcu_read_lock (see list_lru_from_memcg_idx).
+	 *
+	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
+	 * we have to use IRQ-safe primitives here to avoid deadlock.
+	 */
+	spin_lock_irq(&nlru->lock);
+	rcu_assign_pointer(nlru->memcg_lrus, new);
+	spin_unlock_irq(&nlru->lock);
+
+	kvfree_rcu(old, rcu);
+	return 0;
+}
+
+static int memcg_update_list_lru_node(struct list_lru_node *nlru,
+				      int old_size, int new_size)
+{
+	if (new_size > old_size)
+		return memcg_list_lru_node_inc(nlru, old_size, new_size);
+	else if (new_size < old_size)
+		return memcg_list_lru_node_dec(nlru, old_size, new_size);
+
+	return 0;
+}
+
 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
 					      int old_size, int new_size)
 {
 	struct list_lru_memcg *memcg_lrus;
 
+	/* Nothing to do for the shrinking case. */
+	if (old_size >= new_size)
+		return;
+
 	memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
 					       lockdep_is_held(&list_lrus_mutex));
 	/* do not bother shrinking the array back to the old size, because we