diff mbox series

[11/18] bpf-preload: Store multiple bpf_preload_ops structures in a linked list

Message ID 20220328175033.2437312-12-roberto.sassu@huawei.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: Secure and authenticated preloading of eBPF programs | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Roberto Sassu March 28, 2022, 5:50 p.m. UTC
In preparation to support preloading multiple eBPF programs, define a
linked list of bpf_preload_ops_item structures. The new structure contains
the object name from the eBPF program to preload (except for iterators_bpf
whose kernel module name is bpf_preload, the object name and the kernel
module name should match).

The new structure also contains a bpf_preload_ops structure declared in the
light skeleton, with the preload method of the eBPF program.

The list of eBPF programs that can be preloaded can be specified in a
subsequent patch from the kernel configuration or with the new option
bpf_preload_list= in the kernel command line.

For now, bpf_preload is always preloaded, as it still relies on the old
registration method consisting in setting the bpf_preload_ops global
variable. That will change when bpf_preload will switch to the new
registration method based on the linked list.

Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
 kernel/bpf/inode.c | 89 +++++++++++++++++++++++++++++++++++++---------
 1 file changed, 73 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index bb8762abbf3d..0a6e83d32360 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -685,35 +685,91 @@  static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
 struct bpf_preload_ops *bpf_preload_ops;
 EXPORT_SYMBOL_GPL(bpf_preload_ops);
 
-static bool bpf_preload_mod_get(void)
+struct bpf_preload_ops_item {
+	struct list_head list;
+	struct bpf_preload_ops *ops;
+	char *obj_name;
+};
+
+static LIST_HEAD(preload_list);
+static DEFINE_MUTEX(bpf_preload_lock);
+
+static bool bpf_preload_mod_get(const char *obj_name,
+				struct bpf_preload_ops **ops)
 {
-	/* If bpf_preload.ko wasn't loaded earlier then load it now.
-	 * When bpf_preload is built into vmlinux the module's __init
+	/* If the kernel preload module wasn't loaded earlier then load it now.
+	 * When the preload code is built into vmlinux the module's __init
 	 * function will populate it.
 	 */
-	if (!bpf_preload_ops) {
-		request_module("bpf_preload");
-		if (!bpf_preload_ops)
+	if (!*ops) {
+		mutex_unlock(&bpf_preload_lock);
+		request_module(obj_name);
+		mutex_lock(&bpf_preload_lock);
+		if (!*ops)
 			return false;
 	}
 	/* And grab the reference, so the module doesn't disappear while the
 	 * kernel is interacting with the kernel module and its UMD.
 	 */
-	if (!try_module_get(bpf_preload_ops->owner)) {
+	if (!try_module_get((*ops)->owner)) {
 		pr_err("bpf_preload module get failed.\n");
 		return false;
 	}
 	return true;
 }
 
-static void bpf_preload_mod_put(void)
+static void bpf_preload_mod_put(struct bpf_preload_ops *ops)
 {
-	if (bpf_preload_ops)
-		/* now user can "rmmod bpf_preload" if necessary */
-		module_put(bpf_preload_ops->owner);
+	if (ops)
+		/* now user can "rmmod <kernel module>" if necessary */
+		module_put(ops->owner);
 }
 
-static DEFINE_MUTEX(bpf_preload_lock);
+static bool bpf_preload_list_mod_get(void)
+{
+	struct bpf_preload_ops_item *cur;
+	bool ret = false;
+
+	ret |= bpf_preload_mod_get("bpf_preload", &bpf_preload_ops);
+
+	list_for_each_entry(cur, &preload_list, list)
+		ret |= bpf_preload_mod_get(cur->obj_name, &cur->ops);
+
+	return ret;
+}
+
+static int bpf_preload_list(struct dentry *parent)
+{
+	struct bpf_preload_ops_item *cur;
+	int err;
+
+	if (bpf_preload_ops) {
+		err = bpf_preload_ops->preload(parent);
+		if (err)
+			return err;
+	}
+
+	list_for_each_entry(cur, &preload_list, list) {
+		if (!cur->ops)
+			continue;
+
+		err = cur->ops->preload(parent);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void bpf_preload_list_mod_put(void)
+{
+	struct bpf_preload_ops_item *cur;
+
+	list_for_each_entry(cur, &preload_list, list)
+		bpf_preload_mod_put(cur->ops);
+
+	bpf_preload_mod_put(bpf_preload_ops);
+}
 
 static int populate_bpffs(struct dentry *parent)
 {
@@ -724,12 +780,13 @@  static int populate_bpffs(struct dentry *parent)
 	 */
 	mutex_lock(&bpf_preload_lock);
 
-	/* if bpf_preload.ko wasn't built into vmlinux then load it */
-	if (!bpf_preload_mod_get())
+	/* if kernel preload mods weren't built into vmlinux then load them */
+	if (!bpf_preload_list_mod_get())
 		goto out;
 
-	err = bpf_preload_ops->preload(parent);
-	bpf_preload_mod_put();
+	err = bpf_preload_list(parent);
+	bpf_preload_list_mod_put();
+
 out:
 	mutex_unlock(&bpf_preload_lock);
 	return err;