diff mbox series

[bpf-next] bpf: Only allocate one bpf_mem_cache for bpf_cpumask_ma

Message ID 20230216024134.2094999-1-houtao@huaweicloud.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series [bpf-next] bpf: Only allocate one bpf_mem_cache for bpf_cpumask_ma | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR pending PR summary
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 77 this patch: 59
netdev/cc_maintainers success CCed 12 of 12 maintainers
netdev/build_clang fail Errors and warnings before: 1 this patch: 8
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 77 this patch: 59
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Hou Tao Feb. 16, 2023, 2:41 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

The size of bpf_cpumask is fixed, so there is no need to allocate many
bpf_mem_caches for bpf_cpumask_ma, just one bpf_mem_cache is enough.
Also add comments for bpf_mem_alloc_init() in bpf_mem_alloc.h to prevent
future miuse.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 include/linux/bpf_mem_alloc.h | 7 +++++++
 kernel/bpf/cpumask.c          | 6 +++---
 2 files changed, 10 insertions(+), 3 deletions(-)

Comments

Hou Tao Feb. 16, 2023, 2:16 a.m. UTC | #1
Sorry, please disregard this patch. I forgot to fix the typo in it. Will resend.

On 2/16/2023 10:41 AM, Hou Tao wrote:
> From: Hou Tao <houtao1@huawei.com>
>
> The size of bpf_cpumask is fixed, so there is no need to allocate many
> bpf_mem_caches for bpf_cpumask_ma, just one bpf_mem_cache is enough.
> Also add comments for bpf_mem_alloc_init() in bpf_mem_alloc.h to prevent
> future miuse.
>
> Signed-off-by: Hou Tao <houtao1@huawei.com>
> ---
>  include/linux/bpf_mem_alloc.h | 7 +++++++
>  kernel/bpf/cpumask.c          | 6 +++---
>  2 files changed, 10 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
> index 3e164b8efaa9..a7104af61ab4 100644
> --- a/include/linux/bpf_mem_alloc.h
> +++ b/include/linux/bpf_mem_alloc.h
> @@ -14,6 +14,13 @@ struct bpf_mem_alloc {
>  	struct work_struct work;
>  };
>  
> +/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
> + * Alloc and free are done with bpf_mem_cache_{alloc,free}().
> + *
> + * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
> + * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
> + * the returned object is given by the size argument of bpf_mem_alloc().
> + */
>  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
>  void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
>  
> diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
> index 52b981512a35..711434b556fb 100644
> --- a/kernel/bpf/cpumask.c
> +++ b/kernel/bpf/cpumask.c
> @@ -55,7 +55,7 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
>  	/* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */
>  	BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0);
>  
> -	cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask));
> +	cpumask = bpf_mem_cache_alloc(&bpf_cpumask_ma);
>  	if (!cpumask)
>  		return NULL;
>  
> @@ -123,7 +123,7 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
>  
>  	if (refcount_dec_and_test(&cpumask->usage)) {
>  		migrate_disable();
> -		bpf_mem_free(&bpf_cpumask_ma, cpumask);
> +		bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
>  		migrate_enable();
>  	}
>  }
> @@ -468,7 +468,7 @@ static int __init cpumask_kfunc_init(void)
>  		},
>  	};
>  
> -	ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false);
> +	ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sieof(struct bpf_cpumask), false);
>  	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
>  	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
>  	return  ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
diff mbox series

Patch

diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 3e164b8efaa9..a7104af61ab4 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -14,6 +14,13 @@  struct bpf_mem_alloc {
 	struct work_struct work;
 };
 
+/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
+ * Alloc and free are done with bpf_mem_cache_{alloc,free}().
+ *
+ * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
+ * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
+ * the returned object is given by the size argument of bpf_mem_alloc().
+ */
 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
 
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index 52b981512a35..711434b556fb 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -55,7 +55,7 @@  __bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void)
 	/* cpumask must be the first element so struct bpf_cpumask be cast to struct cpumask. */
 	BUILD_BUG_ON(offsetof(struct bpf_cpumask, cpumask) != 0);
 
-	cpumask = bpf_mem_alloc(&bpf_cpumask_ma, sizeof(*cpumask));
+	cpumask = bpf_mem_cache_alloc(&bpf_cpumask_ma);
 	if (!cpumask)
 		return NULL;
 
@@ -123,7 +123,7 @@  __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
 
 	if (refcount_dec_and_test(&cpumask->usage)) {
 		migrate_disable();
-		bpf_mem_free(&bpf_cpumask_ma, cpumask);
+		bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
 		migrate_enable();
 	}
 }
@@ -468,7 +468,7 @@  static int __init cpumask_kfunc_init(void)
 		},
 	};
 
-	ret = bpf_mem_alloc_init(&bpf_cpumask_ma, 0, false);
+	ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sieof(struct bpf_cpumask), false);
 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
 	return  ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,