diff mbox series

[bpf-next,6/8] libbpf, xsk: select bpf_redirect_xsk(), if supported

Message ID 20210119153655.153999-7-bjorn.topel@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Introduce bpf_redirect_xsk() helper | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 5 maintainers not CCed: songliubraving@fb.com andrii@kernel.org kpsingh@kernel.org kafai@fb.com yhs@fb.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Björn Töpel Jan. 19, 2021, 3:36 p.m. UTC
From: Björn Töpel <bjorn.topel@intel.com>

Select bpf_redirect_xsk() as the default AF_XDP BPF program, if
supported.

The bpf_redirect_xsk() helper does not require an XSKMAP, so make sure
that no map is created/updated when using it.

Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 tools/lib/bpf/xsk.c | 46 +++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index c8642c6cb5d6..27e36d6d92a6 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -47,9 +47,12 @@ 
  #define PF_XDP AF_XDP
 #endif
 
+#define XSKMAP_NOT_NEEDED -1
+
 enum xsk_prog {
 	XSK_PROG_FALLBACK,
 	XSK_PROG_REDIRECT_FLAGS,
+	XSK_PROG_REDIRECT_XSK,
 };
 
 struct xsk_umem {
@@ -361,7 +364,11 @@  static enum xsk_prog get_xsk_prog(void)
 {
 	__u32 kver = get_kernel_version();
 
-	return kver < KERNEL_VERSION(5, 3, 0) ? XSK_PROG_FALLBACK : XSK_PROG_REDIRECT_FLAGS;
+	if (kver < KERNEL_VERSION(5, 3, 0))
+		return XSK_PROG_FALLBACK;
+	if (kver < KERNEL_VERSION(5, 12, 0))
+		return XSK_PROG_REDIRECT_FLAGS;
+	return XSK_PROG_REDIRECT_XSK;
 }
 
 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
@@ -445,10 +452,25 @@  static int xsk_load_xdp_prog(struct xsk_socket *xsk)
 		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
 		BPF_EXIT_INSN(),
 	};
+
+	/* This is the post-5.12 kernel C-program:
+	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+	 * {
+	 *     return bpf_redirect_xsk(ctx, XDP_PASS);
+	 * }
+	 */
+	struct bpf_insn prog_redirect_xsk[] = {
+		/* r2 = XDP_PASS */
+		BPF_MOV64_IMM(BPF_REG_2, 2),
+		/* call bpf_redirect_xsk */
+		BPF_EMIT_CALL(BPF_FUNC_redirect_xsk),
+		BPF_EXIT_INSN(),
+	};
 	size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn),
 			      sizeof(prog_redirect_flags) / sizeof(struct bpf_insn),
+			      sizeof(prog_redirect_xsk) / sizeof(struct bpf_insn),
 	};
-	struct bpf_insn *progs[] = {prog, prog_redirect_flags};
+	struct bpf_insn *progs[] = {prog, prog_redirect_flags, prog_redirect_xsk};
 	enum xsk_prog option = get_xsk_prog();
 
 	prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
@@ -508,12 +530,22 @@  static int xsk_get_max_queues(struct xsk_socket *xsk)
 	return ret;
 }
 
+static bool xskmap_required(void)
+{
+	return get_xsk_prog() != XSK_PROG_REDIRECT_XSK;
+}
+
 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
 {
 	struct xsk_ctx *ctx = xsk->ctx;
 	int max_queues;
 	int fd;
 
+	if (!xskmap_required()) {
+		ctx->xsks_map_fd = XSKMAP_NOT_NEEDED;
+		return 0;
+	}
+
 	max_queues = xsk_get_max_queues(xsk);
 	if (max_queues < 0)
 		return max_queues;
@@ -532,6 +564,9 @@  static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
 {
 	struct xsk_ctx *ctx = xsk->ctx;
 
+	if (ctx->xsks_map_fd == XSKMAP_NOT_NEEDED)
+		return;
+
 	bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
 	close(ctx->xsks_map_fd);
 }
@@ -563,7 +598,7 @@  static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
 	if (err)
 		goto out_map_ids;
 
-	ctx->xsks_map_fd = -1;
+	ctx->xsks_map_fd = XSKMAP_NOT_NEEDED;
 
 	for (i = 0; i < prog_info.nr_map_ids; i++) {
 		fd = bpf_map_get_fd_by_id(map_ids[i]);
@@ -585,7 +620,7 @@  static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
 	}
 
 	err = 0;
-	if (ctx->xsks_map_fd == -1)
+	if (ctx->xsks_map_fd == XSKMAP_NOT_NEEDED && xskmap_required())
 		err = -ENOENT;
 
 out_map_ids:
@@ -597,6 +632,9 @@  static int xsk_set_bpf_maps(struct xsk_socket *xsk)
 {
 	struct xsk_ctx *ctx = xsk->ctx;
 
+	if (ctx->xsks_map_fd == XSKMAP_NOT_NEEDED)
+		return 0;
+
 	return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
 				   &xsk->fd, 0);
 }