diff mbox series

[bpf-next,v1,1/2] libbpf: introduce new API libbpf_num_online_cpus

Message ID 20230117044902.98938-1-tong@infragraf.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series [bpf-next,v1,1/2] libbpf: introduce new API libbpf_num_online_cpus | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 12 of 12 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 85 lines checked
netdev/kdoc fail Errors and warnings before: 84 this patch: 86
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-8 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-9 success Logs for set-matrix

Commit Message

Tonghao Zhang Jan. 17, 2023, 4:49 a.m. UTC
From: Tonghao Zhang <tong@infragraf.org>

Adding a new API libbpf_num_online_cpus() that helps user with
fetching online CPUs number.

It's useful in system which number of online CPUs is different with
possible CPUs.

Signed-off-by: Tonghao Zhang <tong@infragraf.org>
Cc: Quentin Monnet <quentin@isovalent.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Song Liu <song@kernel.org>
Cc: Yonghong Song <yhs@fb.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Stanislav Fomichev <sdf@google.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
---
 tools/lib/bpf/libbpf.c   | 47 ++++++++++++++++++++++++++++++----------
 tools/lib/bpf/libbpf.h   |  7 ++++++
 tools/lib/bpf/libbpf.map |  1 +
 3 files changed, 43 insertions(+), 12 deletions(-)

Comments

Quentin Monnet Jan. 18, 2023, 10:41 a.m. UTC | #1
2023-01-17 12:49 UTC+0800 ~ tong@infragraf.org
> From: Tonghao Zhang <tong@infragraf.org>
> 
> Adding a new API libbpf_num_online_cpus() that helps user with
> fetching online CPUs number.
> 
> It's useful in system which number of online CPUs is different with
> possible CPUs.
> 
> Signed-off-by: Tonghao Zhang <tong@infragraf.org>
> Cc: Quentin Monnet <quentin@isovalent.com>
> Cc: Alexei Starovoitov <ast@kernel.org>
> Cc: Daniel Borkmann <daniel@iogearbox.net>
> Cc: Andrii Nakryiko <andrii@kernel.org>
> Cc: Martin KaFai Lau <martin.lau@linux.dev>
> Cc: Song Liu <song@kernel.org>
> Cc: Yonghong Song <yhs@fb.com>
> Cc: John Fastabend <john.fastabend@gmail.com>
> Cc: KP Singh <kpsingh@kernel.org>
> Cc: Stanislav Fomichev <sdf@google.com>
> Cc: Hao Luo <haoluo@google.com>
> Cc: Jiri Olsa <jolsa@kernel.org>
> ---
>  tools/lib/bpf/libbpf.c   | 47 ++++++++++++++++++++++++++++++----------
>  tools/lib/bpf/libbpf.h   |  7 ++++++
>  tools/lib/bpf/libbpf.map |  1 +
>  3 files changed, 43 insertions(+), 12 deletions(-)
> 
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 27d9faa80471..b84904f79ffd 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -12192,30 +12192,53 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
>  	return parse_cpu_mask_str(buf, mask, mask_sz);
>  }
>  
> -int libbpf_num_possible_cpus(void)
> +static int num_cpus(const char *fcpu)
>  {
> -	static const char *fcpu = "/sys/devices/system/cpu/possible";
> -	static int cpus;
> -	int err, n, i, tmp_cpus;
> +	int err, n, i, cpus;
>  	bool *mask;
>  
> -	tmp_cpus = READ_ONCE(cpus);
> -	if (tmp_cpus > 0)
> -		return tmp_cpus;
> -
>  	err = parse_cpu_mask_file(fcpu, &mask, &n);
>  	if (err)
>  		return libbpf_err(err);
>  
> -	tmp_cpus = 0;
> +	cpus = 0;
>  	for (i = 0; i < n; i++) {
>  		if (mask[i])
> -			tmp_cpus++;
> +			cpus++;
>  	}
>  	free(mask);
>  
> -	WRITE_ONCE(cpus, tmp_cpus);
> -	return tmp_cpus;
> +	return cpus;
> +}
> +
> +int libbpf_num_online_cpus(void)
> +{
> +	static int online_cpus;
> +	int cpus;
> +
> +	cpus = READ_ONCE(online_cpus);
> +	if (cpus > 0)
> +		return cpus;

The number of online CPUs can change over time, I don't think you can
READ_ONCE()/WRITE_ONCE().

> +
> +	cpus = num_cpus("/sys/devices/system/cpu/online");
> +
> +	WRITE_ONCE(online_cpus, cpus);
> +	return cpus;
> +}
> +
> +int libbpf_num_possible_cpus(void)
> +{
> +	static int possible_cpus;
> +	int cpus;
> +
> +	cpus = READ_ONCE(possible_cpus);
> +	if (cpus > 0)
> +		return cpus;
> +
> +	cpus = num_cpus("/sys/devices/system/cpu/possible");
> +
> +	WRITE_ONCE(possible_cpus, cpus);
> +	return cpus;
>  }
>  
>  static int populate_skeleton_maps(const struct bpf_object *obj,
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 898db26e42e9..e433575ff865 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -1332,6 +1332,13 @@ LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
>   */
>  LIBBPF_API int libbpf_num_possible_cpus(void);
>  
> +/**
> + * @brief **libbpf_num_online_cpus()** is a helper function to get the
> + * number of online CPUs that the host kernel supports and expects.
> + * @return number of online CPUs; or error code on failure
> + */
> +LIBBPF_API int libbpf_num_online_cpus(void);
> +
>  struct bpf_map_skeleton {
>  	const char *name;
>  	struct bpf_map **map;
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index 11c36a3c1a9f..384fb6333f3f 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -381,6 +381,7 @@ LIBBPF_1.1.0 {
>  		user_ring_buffer__reserve;
>  		user_ring_buffer__reserve_blocking;
>  		user_ring_buffer__submit;
> +		libbpf_num_online_cpus;
>  } LIBBPF_1.0.0;

Libbpf v1.1.0 has shipped, so this would now go to the 1.2.0 block below.

>  
>  LIBBPF_1.2.0 {

Thanks, but I'm not sure retrieving the number of online CPUs, without
the list of those in use, is enough for fixing bpftool profiling
feature. Please see my answer on the second patch.

Quentin
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 27d9faa80471..b84904f79ffd 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -12192,30 +12192,53 @@  int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
 	return parse_cpu_mask_str(buf, mask, mask_sz);
 }
 
-int libbpf_num_possible_cpus(void)
+static int num_cpus(const char *fcpu)
 {
-	static const char *fcpu = "/sys/devices/system/cpu/possible";
-	static int cpus;
-	int err, n, i, tmp_cpus;
+	int err, n, i, cpus;
 	bool *mask;
 
-	tmp_cpus = READ_ONCE(cpus);
-	if (tmp_cpus > 0)
-		return tmp_cpus;
-
 	err = parse_cpu_mask_file(fcpu, &mask, &n);
 	if (err)
 		return libbpf_err(err);
 
-	tmp_cpus = 0;
+	cpus = 0;
 	for (i = 0; i < n; i++) {
 		if (mask[i])
-			tmp_cpus++;
+			cpus++;
 	}
 	free(mask);
 
-	WRITE_ONCE(cpus, tmp_cpus);
-	return tmp_cpus;
+	return cpus;
+}
+
+int libbpf_num_online_cpus(void)
+{
+	static int online_cpus;
+	int cpus;
+
+	cpus = READ_ONCE(online_cpus);
+	if (cpus > 0)
+		return cpus;
+
+	cpus = num_cpus("/sys/devices/system/cpu/online");
+
+	WRITE_ONCE(online_cpus, cpus);
+	return cpus;
+}
+
+int libbpf_num_possible_cpus(void)
+{
+	static int possible_cpus;
+	int cpus;
+
+	cpus = READ_ONCE(possible_cpus);
+	if (cpus > 0)
+		return cpus;
+
+	cpus = num_cpus("/sys/devices/system/cpu/possible");
+
+	WRITE_ONCE(possible_cpus, cpus);
+	return cpus;
 }
 
 static int populate_skeleton_maps(const struct bpf_object *obj,
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 898db26e42e9..e433575ff865 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1332,6 +1332,13 @@  LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
  */
 LIBBPF_API int libbpf_num_possible_cpus(void);
 
+/**
+ * @brief **libbpf_num_online_cpus()** is a helper function to get the
+ * number of online CPUs that the host kernel supports and expects.
+ * @return number of online CPUs; or error code on failure
+ */
+LIBBPF_API int libbpf_num_online_cpus(void);
+
 struct bpf_map_skeleton {
 	const char *name;
 	struct bpf_map **map;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 11c36a3c1a9f..384fb6333f3f 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -381,6 +381,7 @@  LIBBPF_1.1.0 {
 		user_ring_buffer__reserve;
 		user_ring_buffer__reserve_blocking;
 		user_ring_buffer__submit;
+		libbpf_num_online_cpus;
 } LIBBPF_1.0.0;
 
 LIBBPF_1.2.0 {