diff mbox series

[RFC,bpf-next,3/9] mm: percpu: Account active vm for percpu

Message ID 20221212003711.24977-4-laoar.shao@gmail.com (mailing list archive)
State New
Headers show
Series mm, bpf: Add BPF into /proc/meminfo | expand

Commit Message

Yafang Shao Dec. 12, 2022, 12:37 a.m. UTC
Account percpu allocation when active vm item is set. The percpu memory
is accounted at percpu alloc and unaccount at percpu free. To record
which part of percpu chunk is enabled with active vm, we have to
allocate extra memory for this percpu chunk to record the active vm
information. This extra memory will be freed when this percpu chunk
is freed.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 mm/percpu-internal.h |  3 +++
 mm/percpu.c          | 43 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 46 insertions(+)
diff mbox series

Patch

diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index 70b1ea23f4d2..f56e236a2cf3 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -63,6 +63,9 @@  struct pcpu_chunk {
 	int			nr_pages;	/* # of pages served by this chunk */
 	int			nr_populated;	/* # of populated pages */
 	int                     nr_empty_pop_pages; /* # of empty populated pages */
+#ifdef CONFIG_ACTIVE_VM
+	int			*active_vm;	/* vector of activem vm items */
+#endif
 	unsigned long		populated[];	/* populated bitmap */
 };
 
diff --git a/mm/percpu.c b/mm/percpu.c
index 27697b2429c2..05858981ed4a 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -98,6 +98,7 @@ 
 #include <trace/events/percpu.h>
 
 #include "percpu-internal.h"
+#include "active_vm.h"
 
 /*
  * The slots are sorted by the size of the biggest continuous free area.
@@ -1398,6 +1399,9 @@  static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
 #ifdef CONFIG_MEMCG_KMEM
 	/* first chunk is free to use */
 	chunk->obj_cgroups = NULL;
+#endif
+#ifdef CONFIG_ACTIVE_VM
+	chunk->active_vm = NULL;
 #endif
 	pcpu_init_md_blocks(chunk);
 
@@ -1476,6 +1480,14 @@  static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
 	}
 #endif
 
+#ifdef CONFIG_ACTIVE_VM
+	if (active_vm_enabled()) {
+		chunk->active_vm = pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
+								sizeof(int), gfp);
+		if (!chunk->active_vm)
+			goto active_vm_fail;
+	}
+#endif
 	pcpu_init_md_blocks(chunk);
 
 	/* init metadata */
@@ -1483,6 +1495,12 @@  static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
 
 	return chunk;
 
+#ifdef CONFIG_ACTIVE_VM
+active_vm_fail:
+#ifdef CONFIG_MEMCG_KMEM
+	pcpu_mem_free(chunk->obj_cgroups);
+#endif
+#endif
 #ifdef CONFIG_MEMCG_KMEM
 objcg_fail:
 	pcpu_mem_free(chunk->md_blocks);
@@ -1501,6 +1519,9 @@  static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 {
 	if (!chunk)
 		return;
+#ifdef CONFIG_ACTIVE_VM
+	pcpu_mem_free(chunk->active_vm);
+#endif
 #ifdef CONFIG_MEMCG_KMEM
 	pcpu_mem_free(chunk->obj_cgroups);
 #endif
@@ -1890,6 +1911,17 @@  static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 
 	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
 
+#ifdef CONFIG_ACTIVE_VM
+	if (active_vm_enabled() && chunk->active_vm && (gfp & __GFP_ACCOUNT)) {
+		int item = active_vm_item();
+
+		if (item > 0) {
+			chunk->active_vm[off >> PCPU_MIN_ALLOC_SHIFT] = item;
+			active_vm_item_add(item, size);
+		}
+	}
+#endif
+
 	return ptr;
 
 fail_unlock:
@@ -2283,6 +2315,17 @@  void free_percpu(void __percpu *ptr)
 
 	pcpu_memcg_free_hook(chunk, off, size);
 
+#ifdef CONFIG_ACTIVE_VM
+	if (active_vm_enabled() && chunk->active_vm) {
+		int item = chunk->active_vm[off >> PCPU_MIN_ALLOC_SHIFT];
+
+		if (item > 0) {
+			active_vm_item_sub(item, size);
+			chunk->active_vm[off >> PCPU_MIN_ALLOC_SHIFT] = 0;
+		}
+	}
+#endif
+
 	/*
 	 * If there are more than one fully free chunks, wake up grim reaper.
 	 * If the chunk is isolated, it may be in the process of being