diff mbox series

[RFC/PATCH,bpf-next,1/3] bpf: Add kmem_cache iterator

Message ID 20240927184133.968283-2-namhyung@kernel.org (mailing list archive)
State New
Headers show
Series bpf: Add kmem_cache iterator and kfunc (v2) | expand

Commit Message

Namhyung Kim Sept. 27, 2024, 6:41 p.m. UTC
The new "kmem_cache" iterator will traverse the list of slab caches
and call attached BPF programs for each entry.  It should check the
argument (ctx.s) if it's NULL before using it.

The iteration will be done with slab_mutex held but it'd break and
return to user if the BPF program emits data to seq buffer more than
the buffer size given by the user.  IOW the whole iteration would be
protected by the slab_mutex as long as it won't emit anything.

It includes the internal "mm/slab.h" header to access kmem_cache,
slab_caches and slab_mutex.  Hope it's ok to mm folks.

Acked-by: Roman Gushchin <roman.gushchin@linux.dev> (mm/*)
Acked-by: Vlastimil Babka <vbabka@suse.cz> #mm/slab
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 include/linux/btf_ids.h      |   1 +
 kernel/bpf/Makefile          |   1 +
 kernel/bpf/kmem_cache_iter.c | 131 +++++++++++++++++++++++++++++++++++
 3 files changed, 133 insertions(+)
 create mode 100644 kernel/bpf/kmem_cache_iter.c

Comments

Alexei Starovoitov Sept. 29, 2024, 5:04 p.m. UTC | #1
On Fri, Sep 27, 2024 at 11:41 AM Namhyung Kim <namhyung@kernel.org> wrote:
> +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
> +{
> +       loff_t cnt = 0;
> +       struct kmem_cache *s = NULL;
> +
> +       mutex_lock(&slab_mutex);

It would be better to find a way to iterate slabs without holding
the mutex for the duration of the loop.
Maybe use refcnt to hold the kmem_cache while bpf prog is looking at it?
Namhyung Kim Sept. 30, 2024, 2:08 a.m. UTC | #2
On Sun, Sep 29, 2024 at 10:04:00AM -0700, Alexei Starovoitov wrote:
> On Fri, Sep 27, 2024 at 11:41 AM Namhyung Kim <namhyung@kernel.org> wrote:
> > +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
> > +{
> > +       loff_t cnt = 0;
> > +       struct kmem_cache *s = NULL;
> > +
> > +       mutex_lock(&slab_mutex);
> 
> It would be better to find a way to iterate slabs without holding
> the mutex for the duration of the loop.
> Maybe use refcnt to hold the kmem_cache while bpf prog is looking at it?

Do you mean that you want to not hold slab_mutex while BPF program is
running?  Maybe we can allocates an arary of pointers to the slab cahe
(with refcounts) at the beginning and iterate them instead.  And call
kmem_cache_destroy() for each entry at the end.  Is it ok to you?

Thanks,
Namhyung
Alexei Starovoitov Oct. 1, 2024, 6:23 p.m. UTC | #3
On Sun, Sep 29, 2024 at 7:08 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> On Sun, Sep 29, 2024 at 10:04:00AM -0700, Alexei Starovoitov wrote:
> > On Fri, Sep 27, 2024 at 11:41 AM Namhyung Kim <namhyung@kernel.org> wrote:
> > > +static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
> > > +{
> > > +       loff_t cnt = 0;
> > > +       struct kmem_cache *s = NULL;
> > > +
> > > +       mutex_lock(&slab_mutex);
> >
> > It would be better to find a way to iterate slabs without holding
> > the mutex for the duration of the loop.
> > Maybe use refcnt to hold the kmem_cache while bpf prog is looking at it?
>
> Do you mean that you want to not hold slab_mutex while BPF program is
> running?

yes.

> Maybe we can allocates an arary of pointers to the slab cahe
> (with refcounts) at the beginning and iterate them instead.  And call
> kmem_cache_destroy() for each entry at the end.  Is it ok to you?

That doesn't sound efficient.
Just grab a refcnt on kmem_cache before running the prog ?
Drop refcnt, and grab a mutex again to do a next step.
kmem_cache_iter_seq_next() will be running with mutex held, of course.
diff mbox series

Patch

diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index c0e3e1426a82f5c4..139bdececdcfaefb 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -283,5 +283,6 @@  extern u32 btf_tracing_ids[];
 extern u32 bpf_cgroup_btf_id[];
 extern u32 bpf_local_storage_map_btf_id[];
 extern u32 btf_bpf_map_id[];
+extern u32 bpf_kmem_cache_btf_id[];
 
 #endif
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 9b9c151b5c826b31..105328f0b9c04e37 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -52,3 +52,4 @@  obj-$(CONFIG_BPF_PRELOAD) += preload/
 obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
 obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o
 obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o
+obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o
diff --git a/kernel/bpf/kmem_cache_iter.c b/kernel/bpf/kmem_cache_iter.c
new file mode 100644
index 0000000000000000..5f7436b52f2e6b06
--- /dev/null
+++ b/kernel/bpf/kmem_cache_iter.c
@@ -0,0 +1,131 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Google */
+#include <linux/bpf.h>
+#include <linux/btf_ids.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+
+#include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
+
+struct bpf_iter__kmem_cache {
+	__bpf_md_ptr(struct bpf_iter_meta *, meta);
+	__bpf_md_ptr(struct kmem_cache *, s);
+};
+
+static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	loff_t cnt = 0;
+	struct kmem_cache *s = NULL;
+
+	mutex_lock(&slab_mutex);
+
+	/*
+	 * Find an entry at the given position in the slab_caches list instead
+	 * of keeping a reference (of the last visited entry, if any) out of
+	 * slab_mutex. It might miss something if one is deleted in the middle
+	 * while it releases the lock.  But it should be rare and there's not
+	 * much we can do about it.
+	 */
+	list_for_each_entry(s, &slab_caches, list) {
+		if (cnt == *pos)
+			break;
+
+		cnt++;
+	}
+
+	if (cnt != *pos)
+		return NULL;
+
+	++*pos;
+	return s;
+}
+
+static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
+{
+	struct bpf_iter_meta meta;
+	struct bpf_iter__kmem_cache ctx = {
+		.meta = &meta,
+		.s = v,
+	};
+	struct bpf_prog *prog;
+
+	meta.seq = seq;
+	prog = bpf_iter_get_info(&meta, true);
+	if (prog)
+		bpf_iter_run_prog(prog, &ctx);
+
+	mutex_unlock(&slab_mutex);
+}
+
+static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct kmem_cache *s = v;
+
+	++*pos;
+
+	if (list_last_entry(&slab_caches, struct kmem_cache, list) == s)
+		return NULL;
+
+	return list_next_entry(s, list);
+}
+
+static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
+{
+	struct bpf_iter_meta meta;
+	struct bpf_iter__kmem_cache ctx = {
+		.meta = &meta,
+		.s = v,
+	};
+	struct bpf_prog *prog;
+	int ret = 0;
+
+	meta.seq = seq;
+	prog = bpf_iter_get_info(&meta, false);
+	if (prog)
+		ret = bpf_iter_run_prog(prog, &ctx);
+
+	return ret;
+}
+
+static const struct seq_operations kmem_cache_iter_seq_ops = {
+	.start  = kmem_cache_iter_seq_start,
+	.next   = kmem_cache_iter_seq_next,
+	.stop   = kmem_cache_iter_seq_stop,
+	.show   = kmem_cache_iter_seq_show,
+};
+
+BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
+
+static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
+	.seq_ops		= &kmem_cache_iter_seq_ops,
+};
+
+static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
+					    struct seq_file *seq)
+{
+	seq_puts(seq, "kmem_cache iter\n");
+}
+
+DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
+		     struct kmem_cache *s)
+
+static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
+	.target			= "kmem_cache",
+	.feature		= BPF_ITER_RESCHED,
+	.show_fdinfo		= bpf_iter_kmem_cache_show_fdinfo,
+	.ctx_arg_info_size	= 1,
+	.ctx_arg_info		= {
+		{ offsetof(struct bpf_iter__kmem_cache, s),
+		  PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
+	},
+	.seq_info		= &kmem_cache_iter_seq_info,
+};
+
+static int __init bpf_kmem_cache_iter_init(void)
+{
+	bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
+	return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
+}
+
+late_initcall(bpf_kmem_cache_iter_init);