diff mbox series

[bpf-next,v3] libbpf: Add sample_period to creation options

Message ID 20230206133532.2973474-1-arilou@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series [bpf-next,v3] libbpf: Add sample_period to creation options | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 9 maintainers not CCed: john.fastabend@gmail.com daniel@iogearbox.net sdf@google.com jolsa@kernel.org song@kernel.org martin.lau@linux.dev haoluo@google.com yhs@fb.com kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: Please use a blank line after function/struct/union/enum declarations
netdev/kdoc success Errors and warnings before: 110 this patch: 110
netdev/source_inline success Was 0 now: 0

Commit Message

Jon Doron Feb. 6, 2023, 1:35 p.m. UTC
From: Jon Doron <jond@wiz.io>

Add option to set when the perf buffer should wake up, by default the
perf buffer becomes signaled for every event that is being pushed to it.

In case of a high throughput of events it will be more efficient to wake
up only once you have X events ready to be read.

So your application can wakeup once and drain the entire perf buffer.

Signed-off-by: Jon Doron <jond@wiz.io>
---
 tools/lib/bpf/libbpf.c | 9 +++++++--
 tools/lib/bpf/libbpf.h | 3 ++-
 2 files changed, 9 insertions(+), 3 deletions(-)

Comments

Yonghong Song Feb. 7, 2023, 7:15 a.m. UTC | #1
On 2/6/23 5:35 AM, Jon Doron wrote:
> From: Jon Doron <jond@wiz.io>
> 
> Add option to set when the perf buffer should wake up, by default the
> perf buffer becomes signaled for every event that is being pushed to it.
> 
> In case of a high throughput of events it will be more efficient to wake
> up only once you have X events ready to be read.
> 
> So your application can wakeup once and drain the entire perf buffer.
> 
> Signed-off-by: Jon Doron <jond@wiz.io>

LGTM  with one possible change below.

Acked-by: Yonghong Song <yhs@fb.com>

> ---
>   tools/lib/bpf/libbpf.c | 9 +++++++--
>   tools/lib/bpf/libbpf.h | 3 ++-
>   2 files changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index eed5cec6f510..cd0bce5482b2 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -11710,17 +11710,22 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
>   	const size_t attr_sz = sizeof(struct perf_event_attr);
>   	struct perf_buffer_params p = {};
>   	struct perf_event_attr attr;
> +	__u32 sample_period;
>   
>   	if (!OPTS_VALID(opts, perf_buffer_opts))
>   		return libbpf_err_ptr(-EINVAL);
>   
> +	sample_period = OPTS_GET(opts, sample_period, 1);
> +	if (!sample_period)
> +		sample_period = 1;
> +
>   	memset(&attr, 0, attr_sz);
>   	attr.size = attr_sz;
>   	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
>   	attr.type = PERF_TYPE_SOFTWARE;
>   	attr.sample_type = PERF_SAMPLE_RAW;
> -	attr.sample_period = 1;
> -	attr.wakeup_events = 1;
> +	attr.sample_period = sample_period;
> +	attr.wakeup_events = sample_period;
>   
>   	p.attr = &attr;
>   	p.sample_cb = sample_cb;
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 8777ff21ea1d..5d3b75a5acde 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -1246,8 +1246,9 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
>   /* common use perf buffer options */
>   struct perf_buffer_opts {
>   	size_t sz;
> +	__u32 sample_period;
>   };

The data structure now may be 16 bytes for 64bit system and we have
4 byte padding at the end which could be arbitrary value. The libbpf
convention is to add "size_t :0;" at the end of structure to zero
out tail padding during declaration.

> -#define perf_buffer_opts__last_field sz
> +#define perf_buffer_opts__last_field sample_period
>   
>   /**
>    * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
Jon Doron Feb. 7, 2023, 8:19 a.m. UTC | #2
On 06/02/2023, Yonghong Song wrote:
>
>
>On 2/6/23 5:35 AM, Jon Doron wrote:
>>From: Jon Doron <jond@wiz.io>
>>
>>Add option to set when the perf buffer should wake up, by default the
>>perf buffer becomes signaled for every event that is being pushed to it.
>>
>>In case of a high throughput of events it will be more efficient to wake
>>up only once you have X events ready to be read.
>>
>>So your application can wakeup once and drain the entire perf buffer.
>>
>>Signed-off-by: Jon Doron <jond@wiz.io>
>
>LGTM  with one possible change below.
>
>Acked-by: Yonghong Song <yhs@fb.com>
>
>>---
>>  tools/lib/bpf/libbpf.c | 9 +++++++--
>>  tools/lib/bpf/libbpf.h | 3 ++-
>>  2 files changed, 9 insertions(+), 3 deletions(-)
>>
>>diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>>index eed5cec6f510..cd0bce5482b2 100644
>>--- a/tools/lib/bpf/libbpf.c
>>+++ b/tools/lib/bpf/libbpf.c
>>@@ -11710,17 +11710,22 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
>>  	const size_t attr_sz = sizeof(struct perf_event_attr);
>>  	struct perf_buffer_params p = {};
>>  	struct perf_event_attr attr;
>>+	__u32 sample_period;
>>  	if (!OPTS_VALID(opts, perf_buffer_opts))
>>  		return libbpf_err_ptr(-EINVAL);
>>+	sample_period = OPTS_GET(opts, sample_period, 1);
>>+	if (!sample_period)
>>+		sample_period = 1;
>>+
>>  	memset(&attr, 0, attr_sz);
>>  	attr.size = attr_sz;
>>  	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
>>  	attr.type = PERF_TYPE_SOFTWARE;
>>  	attr.sample_type = PERF_SAMPLE_RAW;
>>-	attr.sample_period = 1;
>>-	attr.wakeup_events = 1;
>>+	attr.sample_period = sample_period;
>>+	attr.wakeup_events = sample_period;
>>  	p.attr = &attr;
>>  	p.sample_cb = sample_cb;
>>diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
>>index 8777ff21ea1d..5d3b75a5acde 100644
>>--- a/tools/lib/bpf/libbpf.h
>>+++ b/tools/lib/bpf/libbpf.h
>>@@ -1246,8 +1246,9 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
>>  /* common use perf buffer options */
>>  struct perf_buffer_opts {
>>  	size_t sz;
>>+	__u32 sample_period;
>>  };
>
>The data structure now may be 16 bytes for 64bit system and we have
>4 byte padding at the end which could be arbitrary value. The libbpf
>convention is to add "size_t :0;" at the end of structure to zero
>out tail padding during declaration.
>

Done

>>-#define perf_buffer_opts__last_field sz
>>+#define perf_buffer_opts__last_field sample_period
>>  /**
>>   * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index eed5cec6f510..cd0bce5482b2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -11710,17 +11710,22 @@  struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
 	const size_t attr_sz = sizeof(struct perf_event_attr);
 	struct perf_buffer_params p = {};
 	struct perf_event_attr attr;
+	__u32 sample_period;
 
 	if (!OPTS_VALID(opts, perf_buffer_opts))
 		return libbpf_err_ptr(-EINVAL);
 
+	sample_period = OPTS_GET(opts, sample_period, 1);
+	if (!sample_period)
+		sample_period = 1;
+
 	memset(&attr, 0, attr_sz);
 	attr.size = attr_sz;
 	attr.config = PERF_COUNT_SW_BPF_OUTPUT;
 	attr.type = PERF_TYPE_SOFTWARE;
 	attr.sample_type = PERF_SAMPLE_RAW;
-	attr.sample_period = 1;
-	attr.wakeup_events = 1;
+	attr.sample_period = sample_period;
+	attr.wakeup_events = sample_period;
 
 	p.attr = &attr;
 	p.sample_cb = sample_cb;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 8777ff21ea1d..5d3b75a5acde 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1246,8 +1246,9 @@  typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
 /* common use perf buffer options */
 struct perf_buffer_opts {
 	size_t sz;
+	__u32 sample_period;
 };
-#define perf_buffer_opts__last_field sz
+#define perf_buffer_opts__last_field sample_period
 
 /**
  * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified