diff mbox series

[RFC,bpf-next,04/11] bpf: allow specifying inlinable kfuncs in modules

Message ID 20241107175040.1659341-5-eddyz87@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: inlinable kfuncs for BPF | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-11 pending Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 pending Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 pending Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-15 pending Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 pending Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 226 this patch: 226
netdev/build_tools success Errors and warnings before: 0 (+0) this patch: 0 (+0)
netdev/cc_maintainers warning 6 maintainers not CCed: john.fastabend@gmail.com kpsingh@kernel.org song@kernel.org jolsa@kernel.org haoluo@google.com sdf@fomichev.me
netdev/build_clang fail Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 6875 this patch: 6857
netdev/checkpatch fail CHECK: Please use a blank line after function/struct/union/enum declarations ERROR: code indent should use tabs where possible WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 6 this patch: 6
netdev/source_inline success Was 0 now: 0

Commit Message

Eduard Zingerman Nov. 7, 2024, 5:50 p.m. UTC
Instead of keeping inlinable kfuncs in a single table,
track them as a list of tables, one table per each module providing
such kfuncs. Protect the list with an rwlock to safely deal with
modules load/unload.

Provide two interface functions for use in modules:
- int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size,
                                    struct module *module);
- void bpf_unregister_inlinable_kfuncs(struct module *module);

With an assumption that each module providing inlinable kfuncs would
include BPF elf in the same way verifier.c does, with
bpf_register_inlinable_kfuncs() called at load.
The bpf_unregister_inlinable_kfuncs() is supposed to be called before
module unload.

Each table holds a reference to btf, the refcount of the btf object is
not hold by the table. For lifetime management of module's btf object
relies on:
- correct usage of bpf_register_inlinable_kfuncs /
  bpf_unregister_inlinable_kfuncs to insert/remove tables to the list,
  so that tables are members of the list only when module is loaded;
- add_kfunc_call(), so that kfuncs referenced from inlined bodies are
  not unloaded when inlinable kfunc is inlined.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 include/linux/bpf.h   |   3 +
 include/linux/btf.h   |   7 +++
 kernel/bpf/btf.c      |   2 +-
 kernel/bpf/verifier.c | 129 +++++++++++++++++++++++++++++++++++-------
 4 files changed, 119 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 1b84613b10ac..2bcc9161687b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3501,4 +3501,7 @@  static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog)
 	       prog->expected_attach_type == BPF_TRACE_RAW_TP;
 }
 
+void bpf_unregister_inlinable_kfuncs(struct module *module);
+int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size, struct module *module);
+
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 4214e76c9168..9e8b27493139 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -582,6 +582,7 @@  int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty
 bool btf_types_are_same(const struct btf *btf1, u32 id1,
 			const struct btf *btf2, u32 id2);
 int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
+struct btf *btf_get_module_btf(const struct module *module);
 
 static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
 {
@@ -670,5 +671,11 @@  static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *fun
 {
 	return -EOPNOTSUPP;
 }
+struct btf *btf_get_module_btf(const struct module *module)
+{
+	return NULL;
+}
+
 #endif
+
 #endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index e7a59e6462a9..49b25882fa58 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -8033,7 +8033,7 @@  struct module *btf_try_get_module(const struct btf *btf)
 /* Returns struct btf corresponding to the struct module.
  * This function can return NULL or ERR_PTR.
  */
-static struct btf *btf_get_module_btf(const struct module *module)
+struct btf *btf_get_module_btf(const struct module *module)
 {
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 	struct btf_module *btf_mod, *tmp;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fbf51147f319..b86308896358 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20534,7 +20534,20 @@  struct inlinable_kfunc {
 	u32 btf_id;
 };
 
-static struct inlinable_kfunc inlinable_kfuncs[16];
+/* Represents inlinable kfuncs provided by a module */
+struct inlinable_kfuncs_block {
+	struct list_head list;
+	/* Module or kernel BTF, refcount not taken. Relies on correct
+         * calls to bpf_[un]register_inlinable_kfuncs to guarantee BTF
+         * object lifetime.
+	 */
+	const struct btf *btf;
+	struct inlinable_kfunc funcs[16];
+};
+
+/* List of inlinable_kfuncs_block objects. */
+static struct list_head inlinable_kfuncs = LIST_HEAD_INIT(inlinable_kfuncs);
+static DEFINE_RWLOCK(inlinable_kfuncs_lock);
 
 static void *check_inlinable_kfuncs_ptr(struct blob *blob,
 				      void *ptr, u64 size, const char *context)
@@ -20710,8 +20723,10 @@  static int inlinable_kfuncs_parse_sections(struct blob *blob, struct sh_elf_sect
 			if (strcmp(name, ".text") == 0) {
 				text = shdr;
 				text_idx = i;
-			} else if (strcmp(name, ".BTF") == 0 || strcmp(name, ".BTF.ext") == 0) {
-				/* ignore BTF for now */
+			} else if (strcmp(name, ".BTF") == 0 ||
+				   strcmp(name, ".BTF.ext") == 0 ||
+				   strcmp(name, ".modinfo") == 0) {
+				/* ignore */
 				break;
 			} else {
 				printk("malformed inlinable kfuncs data: unexpected section #%u name ('%s')\n",
@@ -20841,27 +20856,35 @@  static int inlinable_kfuncs_apply_relocs(struct sh_elf_sections *s, struct btf *
  * Do some sanity checks for ELF data structures,
  * (but refrain from being overly paranoid, as this ELF is a part of kernel build).
  */
-static int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size)
+int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size, struct module *module)
 {
 	struct blob blob = { .mem = elf_bin, .size = size };
+	struct inlinable_kfuncs_block *block = NULL;
 	struct sh_elf_sections s;
 	struct btf *btf;
 	Elf_Sym *sym;
 	u32 i, idx;
 	int err;
 
-	btf = bpf_get_btf_vmlinux();
+	btf = btf_get_module_btf(module);
 	if (!btf)
 		return -EINVAL;
 
 	err = inlinable_kfuncs_parse_sections(&blob, &s);
 	if (err < 0)
-		return err;
+		goto err_out;
 
 	err = inlinable_kfuncs_apply_relocs(&s, btf);
 	if (err < 0)
-		return err;
+		goto err_out;
+
+	block = kvmalloc(sizeof(*block), GFP_KERNEL | __GFP_ZERO);
+	if (!block) {
+		err = -ENOMEM;
+		goto err_out;
+	}
 
+	block->btf = btf;
 	idx = 0;
 	for (sym = s.sym, i = 0; i < s.sym_cnt; i++, sym++) {
 		struct inlinable_kfunc *sh;
@@ -20879,11 +20902,13 @@  static int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size)
 		    sym->st_value % sizeof(struct bpf_insn) != 0 ||
 		    sym->st_name >= s.strings_sz) {
 			printk("malformed inlinable kfuncs data: bad symbol #%u\n", i);
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_out;
 		}
-		if (idx == ARRAY_SIZE(inlinable_kfuncs) - 1) {
+		if (idx == ARRAY_SIZE(block->funcs) - 1) {
 			printk("malformed inlinable kfuncs data: too many helper functions\n");
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_out;
 		}
 		insn_num = sym->st_size / sizeof(struct bpf_insn);
 		insns = s.text + sym->st_value;
@@ -20891,9 +20916,10 @@  static int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size)
 		id = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC);
 		if (id < 0) {
 			printk("can't add inlinable kfunc '%s': no btf_id\n", name);
-			return -EINVAL;
+			err = -EINVAL;
+			goto err_out;
 		}
-		sh = &inlinable_kfuncs[idx++];
+		sh = &block->funcs[idx++];
 		sh->insn_num = insn_num;
 		sh->insns = insns;
 		sh->name = name;
@@ -20902,25 +20928,86 @@  static int bpf_register_inlinable_kfuncs(void *elf_bin, u32 size)
 		       sh->name, sym->st_value, sh->insn_num, sh->btf_id);
 	}
 
+	write_lock(&inlinable_kfuncs_lock);
+	list_add(&block->list, &inlinable_kfuncs);
+	write_unlock(&inlinable_kfuncs_lock);
+	if (module)
+		btf_put(btf);
 	return 0;
+
+err_out:
+	kvfree(block);
+	if (module)
+		btf_put(btf);
+	return err;
+}
+EXPORT_SYMBOL_GPL(bpf_register_inlinable_kfuncs);
+
+void bpf_unregister_inlinable_kfuncs(struct module *module)
+{
+	struct inlinable_kfuncs_block *block;
+	struct list_head *pos;
+	struct btf *btf;
+
+	btf = btf_get_module_btf(module);
+	if (!btf)
+		return;
+
+	write_lock(&inlinable_kfuncs_lock);
+	list_for_each(pos, &inlinable_kfuncs) {
+		if (pos == &inlinable_kfuncs)
+			continue;
+		block = container_of(pos, typeof(*block), list);
+		if (block->btf != btf)
+			continue;
+		list_del(&block->list);
+		kvfree(block);
+		break;
+	}
+	write_unlock(&inlinable_kfuncs_lock);
+	btf_put(btf);
 }
+EXPORT_SYMBOL_GPL(bpf_unregister_inlinable_kfuncs);
 
 static int __init inlinable_kfuncs_init(void)
 {
 	return bpf_register_inlinable_kfuncs(&inlinable_kfuncs_data,
-					   &inlinable_kfuncs_data_end - &inlinable_kfuncs_data);
+					     &inlinable_kfuncs_data_end - &inlinable_kfuncs_data,
+					     NULL);
 }
 
 late_initcall(inlinable_kfuncs_init);
 
-static struct inlinable_kfunc *find_inlinable_kfunc(u32 btf_id)
+/* If a program refers to a kfunc from some module, this module is guaranteed
+ * to not be unloaded for the duration of program verification.
+ * Hence there is no need to protect returned 'inlinable_kfunc' instance,
+ * as long as find_inlinable_kfunc() is called during program verification.
+ * However, read_lock(&inlinable_kfuncs_lock) for the duration of this
+ * function is necessary in case some unrelated modules are loaded/unloaded
+ * concurrently to find_inlinable_kfunc() call.
+ */
+static struct inlinable_kfunc *find_inlinable_kfunc(struct btf *btf, u32 btf_id)
 {
-	struct inlinable_kfunc *sh = inlinable_kfuncs;
+	struct inlinable_kfuncs_block *block;
+	struct inlinable_kfunc *sh;
+	struct list_head *pos;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(inlinable_kfuncs); ++i, ++sh)
-		if (sh->btf_id == btf_id)
+	read_lock(&inlinable_kfuncs_lock);
+	list_for_each(pos, &inlinable_kfuncs) {
+		block = container_of(pos, typeof(*block), list);
+		if (block->btf != btf)
+			continue;
+		sh = block->funcs;
+		for (i = 0; i < ARRAY_SIZE(block->funcs); ++i, ++sh) {
+			if (sh->btf_id != btf_id)
+				continue;
+			read_unlock(&inlinable_kfuncs_lock);
 			return sh;
+		}
+		break;
+	}
+	read_unlock(&inlinable_kfuncs_lock);
 	return NULL;
 }
 
@@ -20933,7 +21020,7 @@  static struct bpf_prog *inline_kfunc_call(struct bpf_verifier_env *env, struct b
 					  int insn_idx, int *cnt, s32 stack_base, u16 *stack_depth_extra)
 {
 	struct inlinable_kfunc_regs_usage regs_usage;
-	const struct bpf_kfunc_desc *desc;
+	struct bpf_kfunc_call_arg_meta meta;
 	struct bpf_prog *new_prog;
 	struct bpf_insn *insn_buf;
 	struct inlinable_kfunc *sh;
@@ -20943,10 +21030,10 @@  static struct bpf_prog *inline_kfunc_call(struct bpf_verifier_env *env, struct b
 	s16 stack_off;
 	u32 insn_num;
 
-	desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
-	if (!desc || IS_ERR(desc))
+	err = fetch_kfunc_meta(env, insn, &meta, NULL);
+	if (err < 0)
 		return ERR_PTR(-EFAULT);
-	sh = find_inlinable_kfunc(desc->func_id);
+	sh = find_inlinable_kfunc(meta.btf, meta.func_id);
 	if (!sh)
 		return NULL;