diff mbox series

[bpf-next,v3,02/18] bpf: lpm_trie memory usage

Message ID 20230227152032.12359-3-laoar.shao@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: bpf memory usage | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format fail Series longer than 15 patches (and no cover letter)
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2 this patch: 2
netdev/cc_maintainers warning 2 maintainers not CCed: song@kernel.org martin.lau@linux.dev
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 22 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-17

Commit Message

Yafang Shao Feb. 27, 2023, 3:20 p.m. UTC
trie_mem_usage() is introduced to calculate the lpm_trie memory usage.
Some small memory allocations are ignored. The inner node is also
ignored.

The result as follows,

- before
10: lpm_trie  flags 0x1
        key 8B  value 8B  max_entries 65536  memlock 1048576B

- after
10: lpm_trie  flags 0x1
        key 8B  value 8B  max_entries 65536  memlock 2291536B

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 kernel/bpf/lpm_trie.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

Comments

Hou Tao March 1, 2023, 2:59 a.m. UTC | #1
From: Hou Tao <houtao1@huawei.com>

Hi,

> trie_mem_usage() is introduced to calculate the lpm_trie memory usage.
> Some small memory allocations are ignored. The inner node is also
> ignored.
> 
> The result as follows,
> 
> - before
> 10: lpm_trie  flags 0x1
>         key 8B  value 8B  max_entries 65536  memlock 1048576B
> 
> - after
> 10: lpm_trie  flags 0x1
>         key 8B  value 8B  max_entries 65536  memlock 2291536B
> 
> Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
> ---
>  kernel/bpf/lpm_trie.c | 11 +++++++++++
>  1 file changed, 11 insertions(+)
> 
> diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
> index d833496..e0ca08e 100644
> --- a/kernel/bpf/lpm_trie.c
> +++ b/kernel/bpf/lpm_trie.c
> @@ -720,6 +720,16 @@ static int trie_check_btf(const struct bpf_map *map,
>  	       -EINVAL : 0;
>  }
>  
> +static u64 trie_mem_usage(const struct bpf_map *map)
> +{
> +	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
> +	u64 elem_size;
> +
> +	elem_size = sizeof(struct lpm_trie_node) + trie->data_size +
> +			    trie->map.value_size;
> +	return elem_size * trie->n_entries;
Need to use READ_ONCE(trie->n_entries) because all updates of n_entries are protected by trie->lock and here it is a lockless read.

> +}
> +
>  BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
>  const struct bpf_map_ops trie_map_ops = {
>  	.map_meta_equal = bpf_map_meta_equal,
> @@ -733,5 +743,6 @@ static int trie_check_btf(const struct bpf_map *map,
>  	.map_update_batch = generic_map_update_batch,
>  	.map_delete_batch = generic_map_delete_batch,
>  	.map_check_btf = trie_check_btf,
> +	.map_mem_usage = trie_mem_usage,
>  	.map_btf_id = &trie_map_btf_ids[0],
>  };
> -- 
> 1.8.3.1
>
Yafang Shao March 3, 2023, 10:38 a.m. UTC | #2
On Wed, Mar 1, 2023 at 10:31 AM Hou Tao <houtao@huaweicloud.com> wrote:
>
> From: Hou Tao <houtao1@huawei.com>
>
> Hi,
>
> > trie_mem_usage() is introduced to calculate the lpm_trie memory usage.
> > Some small memory allocations are ignored. The inner node is also
> > ignored.
> >
> > The result as follows,
> >
> > - before
> > 10: lpm_trie  flags 0x1
> >         key 8B  value 8B  max_entries 65536  memlock 1048576B
> >
> > - after
> > 10: lpm_trie  flags 0x1
> >         key 8B  value 8B  max_entries 65536  memlock 2291536B
> >
> > Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
> > ---
> >  kernel/bpf/lpm_trie.c | 11 +++++++++++
> >  1 file changed, 11 insertions(+)
> >
> > diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
> > index d833496..e0ca08e 100644
> > --- a/kernel/bpf/lpm_trie.c
> > +++ b/kernel/bpf/lpm_trie.c
> > @@ -720,6 +720,16 @@ static int trie_check_btf(const struct bpf_map *map,
> >              -EINVAL : 0;
> >  }
> >
> > +static u64 trie_mem_usage(const struct bpf_map *map)
> > +{
> > +     struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
> > +     u64 elem_size;
> > +
> > +     elem_size = sizeof(struct lpm_trie_node) + trie->data_size +
> > +                         trie->map.value_size;
> > +     return elem_size * trie->n_entries;
> Need to use READ_ONCE(trie->n_entries) because all updates of n_entries are protected by trie->lock and here it is a lockless read.

Indeed. Will change it in the next version.
Thanks for your review.
diff mbox series

Patch

diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index d833496..e0ca08e 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -720,6 +720,16 @@  static int trie_check_btf(const struct bpf_map *map,
 	       -EINVAL : 0;
 }
 
+static u64 trie_mem_usage(const struct bpf_map *map)
+{
+	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+	u64 elem_size;
+
+	elem_size = sizeof(struct lpm_trie_node) + trie->data_size +
+			    trie->map.value_size;
+	return elem_size * trie->n_entries;
+}
+
 BTF_ID_LIST_SINGLE(trie_map_btf_ids, struct, lpm_trie)
 const struct bpf_map_ops trie_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -733,5 +743,6 @@  static int trie_check_btf(const struct bpf_map *map,
 	.map_update_batch = generic_map_update_batch,
 	.map_delete_batch = generic_map_delete_batch,
 	.map_check_btf = trie_check_btf,
+	.map_mem_usage = trie_mem_usage,
 	.map_btf_id = &trie_map_btf_ids[0],
 };