diff mbox series

[RFC,9/9] bpf: support recharge for hash map

Message ID 20220308131056.6732-10-laoar.shao@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf, mm: recharge bpf memory from offline memcg | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next fail VM_Test
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Yafang Shao March 8, 2022, 1:10 p.m. UTC
This patch supports recharge for hash map. We have already known how the
hash map is allocated and freed, we can also know how to charge and
uncharge the hash map. Firstly, we need to uncharge it from the old
memcg, then charge it to the current memcg. The old memcg must be an
offline memcg.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 kernel/bpf/hashtab.c | 35 +++++++++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)
diff mbox series

Patch

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 6587796..4d103f1 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -10,6 +10,7 @@ 
 #include <linux/random.h>
 #include <uapi/linux/btf.h>
 #include <linux/rcupdate_trace.h>
+#include <linux/memcontrol.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
 #include "map_in_map.h"
@@ -1466,6 +1467,36 @@  static void htab_map_free(struct bpf_map *map)
 	kfree(htab);
 }
 
+static bool htab_map_recharge_memcg(struct bpf_map *map)
+{
+	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+	struct mem_cgroup *old = map->memcg;
+	int i;
+
+	if (!old)
+		return false;
+
+	/* Only process offline memcg */
+	if (old == root_mem_cgroup || old->kmemcg_id >= 0)
+		return false;
+
+	bpf_map_release_memcg(map);
+	kcharge(htab, false);
+	kvcharge(htab->buckets, false);
+	charge_percpu(htab->extra_elems, false);
+	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
+		charge_percpu(htab->map_locked[i], false);
+
+	kcharge(htab, true);
+	kvcharge(htab->buckets, true);
+	charge_percpu(htab->extra_elems, true);
+	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
+		charge_percpu(htab->map_locked[i], true);
+	bpf_map_save_memcg(map);
+
+	return true;
+}
+
 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
 				   struct seq_file *m)
 {
@@ -2111,6 +2142,7 @@  static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f
 	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
+	.map_recharge_memcg = htab_map_recharge_memcg,
 	.map_get_next_key = htab_map_get_next_key,
 	.map_release_uref = htab_map_free_timers,
 	.map_lookup_elem = htab_map_lookup_elem,
@@ -2133,6 +2165,7 @@  static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f
 	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
+	.map_recharge_memcg = htab_map_recharge_memcg,
 	.map_get_next_key = htab_map_get_next_key,
 	.map_release_uref = htab_map_free_timers,
 	.map_lookup_elem = htab_lru_map_lookup_elem,
@@ -2258,6 +2291,7 @@  static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
 	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
+	.map_recharge_memcg = htab_map_recharge_memcg,
 	.map_get_next_key = htab_map_get_next_key,
 	.map_lookup_elem = htab_percpu_map_lookup_elem,
 	.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
@@ -2278,6 +2312,7 @@  static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
 	.map_alloc_check = htab_map_alloc_check,
 	.map_alloc = htab_map_alloc,
 	.map_free = htab_map_free,
+	.map_recharge_memcg = htab_map_recharge_memcg,
 	.map_get_next_key = htab_map_get_next_key,
 	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
 	.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,