diff mbox series

[RFC,5/9] mm: add methord to charge percpu address

Message ID 20220308131056.6732-6-laoar.shao@gmail.com (mailing list archive)
State RFC
Headers show
Series bpf, mm: recharge bpf memory from offline memcg | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next fail VM_Test
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Yafang Shao March 8, 2022, 1:10 p.m. UTC
This patch adds a methord to charge or uncharge a percpu address.
It is similar to free_percpu except that it doesn't touch the related
pages while does account only.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 include/linux/percpu.h |  1 +
 mm/percpu.c            | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 51 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index f1ec5ad..1a65221 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -128,6 +128,7 @@  extern int __init pcpu_page_first_chunk(size_t reserved_size,
 extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
 extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
 extern void free_percpu(void __percpu *__pdata);
+void charge_percpu(void __percpu *__pdata, bool charge);
 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
 
 #define alloc_percpu_gfp(type, gfp)					\
diff --git a/mm/percpu.c b/mm/percpu.c
index ea28db2..22fc0ff 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2309,6 +2309,56 @@  void free_percpu(void __percpu *ptr)
 }
 EXPORT_SYMBOL_GPL(free_percpu);
 
+void charge_percpu(void __percpu *ptr, bool charge)
+{
+	int bit_off, off, bits, size, end;
+	struct obj_cgroup *objcg;
+	struct pcpu_chunk *chunk;
+	unsigned long flags;
+	void *addr;
+
+	WARN_ON(!in_task());
+
+	if (!ptr)
+		return;
+
+	addr = __pcpu_ptr_to_addr(ptr);
+	spin_lock_irqsave(&pcpu_lock, flags);
+	chunk = pcpu_chunk_addr_search(addr);
+	off = addr - chunk->base_addr;
+	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
+	if (!objcg) {
+		spin_unlock_irqrestore(&pcpu_lock, flags);
+		return;
+	}
+
+	bit_off = off / PCPU_MIN_ALLOC_SIZE;
+	/* find end index */
+	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
+			bit_off + 1);
+	bits = end - bit_off;
+	size = bits * PCPU_MIN_ALLOC_SIZE;
+
+	if (charge) {
+		obj_cgroup_get(objcg);
+		obj_cgroup_charge(objcg, GFP_KERNEL, size * num_possible_cpus());
+		rcu_read_lock();
+		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
+			(size * num_possible_cpus()));
+		rcu_read_unlock();
+	} else {
+		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
+		rcu_read_lock();
+		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
+			-(size * num_possible_cpus()));
+		rcu_read_unlock();
+		obj_cgroup_put(objcg);
+	}
+
+	spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+EXPORT_SYMBOL(charge_percpu);
+
 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
 {
 #ifdef CONFIG_SMP