diff mbox series

[bpf-next,07/14] libbpf: add ring__avail_data_size

Message ID 20230914231123.193901-8-martin.kelly@crowdstrike.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series add libbpf getters for individual ringbuffers | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 9 this patch: 9
netdev/cc_maintainers warning 8 maintainers not CCed: martin.lau@linux.dev jolsa@kernel.org haoluo@google.com sdf@google.com john.fastabend@gmail.com yonghong.song@linux.dev kpsingh@kernel.org song@kernel.org
netdev/build_clang success Errors and warnings before: 9 this patch: 9
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 9 this patch: 9
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 35 lines checked
netdev/kdoc fail Errors and warnings before: 124 this patch: 126
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-0 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on s390x with gcc

Commit Message

Martin Kelly Sept. 14, 2023, 11:11 p.m. UTC
Add ring__avail_data_size for querying the currently available data in
the ringbuffer, similar to the BPF_RB_AVAIL_DATA flag in
bpf_ringbuf_query. This is racy during ongoing operations but is still
useful for overall information on how a ringbuffer is behaving.

Signed-off-by: Martin Kelly <martin.kelly@crowdstrike.com>
---
 tools/lib/bpf/libbpf.h   | 11 +++++++++++
 tools/lib/bpf/libbpf.map |  1 +
 tools/lib/bpf/ringbuf.c  |  5 +++++
 3 files changed, 17 insertions(+)

Comments

Andrii Nakryiko Sept. 21, 2023, 1:28 a.m. UTC | #1
On Thu, Sep 14, 2023 at 4:12 PM Martin Kelly
<martin.kelly@crowdstrike.com> wrote:
>
> Add ring__avail_data_size for querying the currently available data in
> the ringbuffer, similar to the BPF_RB_AVAIL_DATA flag in
> bpf_ringbuf_query. This is racy during ongoing operations but is still
> useful for overall information on how a ringbuffer is behaving.
>
> Signed-off-by: Martin Kelly <martin.kelly@crowdstrike.com>
> ---
>  tools/lib/bpf/libbpf.h   | 11 +++++++++++
>  tools/lib/bpf/libbpf.map |  1 +
>  tools/lib/bpf/ringbuf.c  |  5 +++++
>  3 files changed, 17 insertions(+)
>
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 935162dbb3bf..87e3bad37737 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -1279,6 +1279,17 @@ LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r);
>   */
>  LIBBPF_API unsigned long ring__producer_pos(const struct ring *r);
>
> +/**
> + * @brief **ring__avail_data_size()** returns the number of bytes in this
> + * ringbuffer not yet consumed. This has no locking associated with it, so it
> + * can be inaccurate if operations are ongoing while this is called. However, it
> + * should still show the correct trend over the long-term.
> + *
> + * @param r A ring object.
> + * @return The number of bytes not yet consumed.
> + */
> +LIBBPF_API size_t ring__avail_data_size(const struct ring *r);
> +
>  struct user_ring_buffer_opts {
>         size_t sz; /* size of this struct, for forward/backward compatibility */
>  };
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index 1c532fe7a445..f66d7f0bc224 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -401,6 +401,7 @@ LIBBPF_1.3.0 {
>                 bpf_program__attach_tcx;
>                 bpf_program__attach_uprobe_multi;
>                 ring_buffer__ring;
> +               ring__avail_data_size;
>                 ring__consumer_pos;
>                 ring__producer_pos;
>  } LIBBPF_1.2.0;
> diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
> index 54c596db57a4..f51ad1af6ab8 100644
> --- a/tools/lib/bpf/ringbuf.c
> +++ b/tools/lib/bpf/ringbuf.c
> @@ -350,6 +350,11 @@ unsigned long ring__producer_pos(const struct ring *r)
>         return smp_load_acquire(r->producer_pos);
>  }
>
> +size_t ring__avail_data_size(const struct ring *r)
> +{
> +       return ring__producer_pos(r) - ring__consumer_pos(r);

this might be ok as is, but if you look at kernel implementation, we
make sure to get consumer position first, and then producer position
second, then calculate difference. This is deliberately to avoid the
situation when consumer pos is greater than producer pos, which will
result in non-sensical negative (or huge) numbers.

Let's do the same, use two local variables, and have conservative
ordering: consumer, then producer.


> +}
> +
>  static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
>  {
>         if (rb->consumer_pos) {
> --
> 2.34.1
>
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 935162dbb3bf..87e3bad37737 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -1279,6 +1279,17 @@  LIBBPF_API unsigned long ring__consumer_pos(const struct ring *r);
  */
 LIBBPF_API unsigned long ring__producer_pos(const struct ring *r);
 
+/**
+ * @brief **ring__avail_data_size()** returns the number of bytes in this
+ * ringbuffer not yet consumed. This has no locking associated with it, so it
+ * can be inaccurate if operations are ongoing while this is called. However, it
+ * should still show the correct trend over the long-term.
+ *
+ * @param r A ring object.
+ * @return The number of bytes not yet consumed.
+ */
+LIBBPF_API size_t ring__avail_data_size(const struct ring *r);
+
 struct user_ring_buffer_opts {
 	size_t sz; /* size of this struct, for forward/backward compatibility */
 };
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 1c532fe7a445..f66d7f0bc224 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -401,6 +401,7 @@  LIBBPF_1.3.0 {
 		bpf_program__attach_tcx;
 		bpf_program__attach_uprobe_multi;
 		ring_buffer__ring;
+		ring__avail_data_size;
 		ring__consumer_pos;
 		ring__producer_pos;
 } LIBBPF_1.2.0;
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 54c596db57a4..f51ad1af6ab8 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -350,6 +350,11 @@  unsigned long ring__producer_pos(const struct ring *r)
 	return smp_load_acquire(r->producer_pos);
 }
 
+size_t ring__avail_data_size(const struct ring *r)
+{
+	return ring__producer_pos(r) - ring__consumer_pos(r);
+}
+
 static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
 {
 	if (rb->consumer_pos) {