diff mbox series

[RFC,bpf-next,3/6] bpf: Pass bitwise flags to bpf_mem_alloc_init()

Message ID 20221230041151.1231169-4-houtao@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series bpf: Handle reuse in bpf memory alloc | expand

Commit Message

Hou Tao Dec. 30, 2022, 4:11 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

Extend a boolean argument to a bitwise flags argument for
bpf_mem_alloc_init(), so more new flags can be added later.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 include/linux/bpf_mem_alloc.h | 8 +++++++-
 kernel/bpf/core.c             | 2 +-
 kernel/bpf/hashtab.c          | 5 +++--
 kernel/bpf/memalloc.c         | 4 +++-
 4 files changed, 14 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 3c287db087e7..b9f6b9155fa5 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -13,9 +13,15 @@  struct bpf_mem_alloc {
 	struct bpf_mem_cache __percpu *cache;
 	struct work_struct work;
 	void (*ctor)(struct bpf_mem_alloc *ma, void *obj);
+	unsigned int flags;
 };
 
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+/* flags for bpf_mem_alloc_init() */
+enum {
+	BPF_MA_PERCPU = 1,
+};
+
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags,
 		       void (*ctor)(struct bpf_mem_alloc *, void *));
 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
 
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6da2f9a6b085..ca9a698c3f08 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2755,7 +2755,7 @@  static int __init bpf_global_ma_init(void)
 {
 	int ret;
 
-	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false, NULL);
+	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, 0, NULL);
 	bpf_global_ma_set = !ret;
 	return ret;
 }
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3d6557ec4b92..623111d4276d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -574,13 +574,14 @@  static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 				goto free_prealloc;
 		}
 	} else {
-		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false,
+		err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, 0,
 					 htab_elem_ctor);
 		if (err)
 			goto free_map_locked;
 		if (percpu) {
 			err = bpf_mem_alloc_init(&htab->pcpu_ma,
-						 round_up(htab->map.value_size, 8), true, NULL);
+						 round_up(htab->map.value_size, 8),
+						 BPF_MA_PERCPU, NULL);
 			if (err)
 				goto free_map_locked;
 		}
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 3ad2e25946b5..454c86596111 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -383,7 +383,7 @@  static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
  * kmalloc/kfree. Max allocation size is 4096 in this case.
  * This is bpf_dynptr and bpf_kptr use case.
  */
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags,
 		       void (*ctor)(struct bpf_mem_alloc *, void *))
 {
 	static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
@@ -391,7 +391,9 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
 	struct bpf_mem_cache *c, __percpu *pc;
 	struct obj_cgroup *objcg = NULL;
 	int cpu, i, unit_size, percpu_size = 0;
+	bool percpu = (flags & BPF_MA_PERCPU);
 
+	ma->flags = flags;
 	ma->ctor = ctor;
 	if (size) {
 		pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);