diff mbox series

[bpf-next,2/7] mm: percpu: introduce percpu_size()

Message ID 20230202014158.19616-3-laoar.shao@gmail.com (mailing list archive)
State New
Headers show
Series bpf, mm: bpf memory usage | expand

Commit Message

Yafang Shao Feb. 2, 2023, 1:41 a.m. UTC
Introduce a new helper percpu_size() to report full size of underlying
allocation of a percpu address.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
---
 include/linux/percpu.h |  1 +
 mm/percpu.c            | 35 +++++++++++++++++++++++++++++++++++
 2 files changed, 36 insertions(+)

Comments

Christoph Lameter Feb. 2, 2023, 2:32 p.m. UTC | #1
On Thu, 2 Feb 2023, Yafang Shao wrote:

> +	bits = end - bit_off;
> +	size = bits * PCPU_MIN_ALLOC_SIZE;
> +
> +	return pcpu_obj_full_size(size);

Dont you have to multiply by the number of online cpus? The per cpu area
are duplicated for those.
Yafang Shao Feb. 2, 2023, 3:01 p.m. UTC | #2
On Thu, Feb 2, 2023 at 10:32 PM Christoph Lameter <cl@gentwo.de> wrote:
>
> On Thu, 2 Feb 2023, Yafang Shao wrote:
>
> > +     bits = end - bit_off;
> > +     size = bits * PCPU_MIN_ALLOC_SIZE;
> > +
> > +     return pcpu_obj_full_size(size);
>
> Dont you have to multiply by the number of online cpus? The per cpu area
> are duplicated for those.

It is multiplied in pcpu_obj_full_size().
diff mbox series

Patch

diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 1338ea2..7be4234 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -137,5 +137,6 @@  extern int __init pcpu_page_first_chunk(size_t reserved_size,
 						__alignof__(type))
 
 extern unsigned long pcpu_nr_pages(void);
+extern size_t percpu_size(void __percpu *ptr);
 
 #endif /* __LINUX_PERCPU_H */
diff --git a/mm/percpu.c b/mm/percpu.c
index acd78da..5580688 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2302,6 +2302,41 @@  void free_percpu(void __percpu *ptr)
 }
 EXPORT_SYMBOL_GPL(free_percpu);
 
+/**
+ * percpu_size - report full size of underlying allocation of percpu addr
+ * @ptr: pointer to percpu area
+ *
+ * CONTEXT:
+ * Can be called from atomic context.
+ */
+size_t percpu_size(void __percpu *ptr)
+{
+	int bit_off, bits, end, off, size;
+	struct pcpu_chunk *chunk;
+	unsigned long flags;
+	void *addr;
+
+	if (!ptr)
+		return 0;
+
+	addr = __pcpu_ptr_to_addr(ptr);
+
+	spin_lock_irqsave(&pcpu_lock, flags);
+	chunk = pcpu_chunk_addr_search(addr);
+	off = addr - chunk->base_addr;
+	bit_off = off / PCPU_MIN_ALLOC_SIZE;
+
+	/* find end index */
+	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
+			    bit_off + 1);
+	spin_unlock_irqrestore(&pcpu_lock, flags);
+
+	bits = end - bit_off;
+	size = bits * PCPU_MIN_ALLOC_SIZE;
+
+	return pcpu_obj_full_size(size);
+}
+
 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
 {
 #ifdef CONFIG_SMP