diff mbox series

[RFC,bpf-next,5/5] bpf: x86: add BPF_STATIC_BRANCH_UPDATE syscall

Message ID 20240122164936.810117-6-aspsk@isovalent.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series static branches | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for x86_64-gcc / test
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-llvm-17 / test
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-llvm-18 / test
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-18 / veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success SINGLE THREAD; Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 1817 this patch: 1819
netdev/build_tools success Errors and warnings before: 2 this patch: 0
netdev/cc_maintainers success CCed 0 of 0 maintainers
netdev/build_clang fail Errors and warnings before: 155 this patch: 157
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 1871 this patch: 1873
netdev/checkpatch warning WARNING: line length of 107 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 1 this patch: 1
netdev/source_inline success Was 0 now: 0

Commit Message

Anton Protopopov Jan. 22, 2024, 4:49 p.m. UTC
Add a new bpf system call, BPF_STATIC_BRANCH_UPDATE, which allows users to
update static branches in BPF. Namely, this system call is executed as

    bpf(BPF_STATIC_BRANCH_UPDATE, attrs={prog_fd, insn_off, on})

where prog_fd points to a BPF program, insn_off is an _xlated_ offset in
this program, on is a boolean value to set this branch on or off.
The instruction at insn_off must be a JA with SRC_REG or'ed with
BPF_STATIC_BRANCH_JA and, optionally, with BPF_STATIC_BRANCH_INVERSE.

To implement this for a particular architecture, re-define the weak
bpf_arch_poke_static_branch() function in the corresponding bpf_jit_comp.c

Example of usage can be found below.  Lets load and compile the
following [dummy] program:

    SEC("kprobe/__x64_sys_getpgid")
    int worker(void *ctx)
    {
            if (bpf_static_branch_unlikely(&key))
                    return 1;
            else
                    return 0;
    }

Here key is some map and bpf_static_branch_unlikely() is defined as
follows:

    static __always_inline int __bpf_static_branch_nop(void *static_key)
    {
            asm goto("1:\n\t"
                    "nop_or_gotol %l[l_yes]\n\t"
                    ".pushsection .jump_table, \"aw\"\n\t"
                    ".balign 8\n\t"
                    ".long 1b - .\n\t"
                    ".long %l[l_yes] - .\n\t"
                    ".quad %c0 - .\n\t"
                    ".popsection\n\t"
                    :: "i" (static_key)
                    :: l_yes);
            return 0;
    l_yes:
            return 1;
    }

    #define bpf_static_branch_unlikely(static_key) \
            unlikely(__bpf_static_branch_nop(static_key))

Here the extra code is needed to automate search for the static
branch location, and the main part is the usage of asm goto + the
nop_or_gotol instruction.

After compilation and load the program will look like this:

    # bpftool prog dump x id 42
    int worker(void * ctx):
       0: (b7) r0 = 1
       1: (06) nop_or_gotol pc+1
       2: (b7) r0 = 0
       3: (95) exit

And the jitted program will have nop_or_gotol (jitted offset 0x10)
translated to a NOP (as the branch is not activated by default):

    # bpftool prog dump j id 42
    int worker(void * ctx):
       0:   nopl    (%rax,%rax)
       5:   nop
       7:   pushq   %rbp
       8:   movq    %rsp, %rbp
       b:   movl    $1, %eax
    ; asm goto("1:\n\t"
      10:   nop
      12:   xorl    %eax, %eax
      14:   leave
      15:   jmp     0xffffffffcbc16ed8

If we issue a

    bpf(BPF_STATIC_BRANCH_UPDATE, {bpf_prog_get_fd_by_id(42), .off=1, .on=1})

syscall (xlated offset = 1, on = 1), then the jitted code will change
to

    # bpftool prog dump j id 42
    int worker(void * ctx):
       0:   nopl    (%rax,%rax)
       5:   nop
       7:   pushq   %rbp
       8:   movq    %rsp, %rbp
       b:   movl    $1, %eax
    ; asm goto("1:\n\t"
      10:   jmp     0x14
      12:   xorl    %eax, %eax
      14:   leave
      15:   jmp     0xffffffffcbc16ed8

as expected.

A "likely" variant can be implemented using the 'gotol_or_nop'
instruction.

Signed-off-by: Anton Protopopov <aspsk@isovalent.com>
---
 arch/x86/net/bpf_jit_comp.c | 39 +++++++++++++++++++++++++
 include/linux/bpf.h         |  2 ++
 include/linux/filter.h      |  1 +
 include/uapi/linux/bpf.h    |  7 +++++
 kernel/bpf/core.c           |  5 ++++
 kernel/bpf/syscall.c        | 57 +++++++++++++++++++++++++++++++++++++
 6 files changed, 111 insertions(+)
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 52b9de134ab3..c757e4d997a7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2098,8 +2098,16 @@  st:			if (is_imm8(insn->off))
 				if (bpf_prog->aux->func_idx)
 					off += bpf_prog->aux->func_info[bpf_prog->aux->func_idx].insn_off;
 
+				bpf_prog->aux->xlated_to_jit[off].ip = image + proglen;
 				bpf_prog->aux->xlated_to_jit[off].off = proglen;
 				bpf_prog->aux->xlated_to_jit[off].len = ilen;
+
+				/*
+				 * save the offset so that it can later be accessed
+				 * by the bpf(BPF_STATIC_BRANCH_UPDATE) syscall
+				 */
+				if (insn->code == (BPF_JMP | BPF_JA) || insn->code == (BPF_JMP32 | BPF_JA))
+					bpf_prog->aux->xlated_to_jit[off].jmp_offset = jmp_offset;
 			}
 		}
 		proglen += ilen;
@@ -3275,3 +3283,34 @@  bool bpf_jit_supports_ptr_xchg(void)
 {
 	return true;
 }
+
+int bpf_arch_poke_static_branch(struct bpf_prog *prog,
+				u32 insn_off,
+				bool on)
+{
+	int jmp_offset = prog->aux->xlated_to_jit[insn_off].jmp_offset;
+	u32 len = prog->aux->xlated_to_jit[insn_off].len;
+	u8 op[5];
+
+	if (is_imm8(jmp_offset) && len != 2)
+		return -EINVAL;
+
+	if (!is_imm8(jmp_offset) && len != 5)
+		return -EINVAL;
+
+	if (on) {
+		if (len == 2) {
+			op[0] = 0xEB;
+			op[1] = jmp_offset;
+		} else {
+			op[0] = 0xE9;
+			memcpy(&op[1], &jmp_offset, 4);
+		}
+	} else {
+		memcpy(op, x86_nops[len], len);
+	}
+
+	text_poke_bp(prog->aux->xlated_to_jit[insn_off].ip, op, len, NULL);
+
+	return 0;
+}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 660df06cb541..ba77e0c6f390 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1524,8 +1524,10 @@  struct bpf_prog_aux {
 	 * instructions, if allocated
 	 */
 	struct {
+		void *ip;	/* the address of the jitted insn */
 		u32 off;	/* local offset in the jitted code */
 		u32 len;	/* the total len of generated jit code */
+		u32 jmp_offset;	/* jitted jump offset for BPF_JA insns */
 	} *xlated_to_jit;
 };
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 35f067fd3840..ff76a60cf247 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -957,6 +957,7 @@  bool bpf_jit_supports_far_kfunc_call(void);
 bool bpf_jit_supports_exceptions(void);
 bool bpf_jit_supports_ptr_xchg(void);
 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+int bpf_arch_poke_static_branch(struct bpf_prog *prog, u32 off, bool on);
 bool bpf_helper_changes_pkt_data(void *func);
 
 static inline bool bpf_dump_raw_ok(const struct cred *cred)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 43ad332ffbee..e5d226838a3d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -901,6 +901,7 @@  enum bpf_cmd {
 	BPF_ITER_CREATE,
 	BPF_LINK_DETACH,
 	BPF_PROG_BIND_MAP,
+	BPF_STATIC_BRANCH_UPDATE,
 };
 
 enum bpf_map_type {
@@ -1724,6 +1725,12 @@  union bpf_attr {
 		__u32		flags;		/* extra flags */
 	} prog_bind_map;
 
+	struct { /* struct used by BPF_STATIC_BRANCH_UPDATE command */
+		__u32		prog_fd;
+		__u32		insn_off;
+		__u32		on;
+	} static_branch;
+
 } __attribute__((aligned(8)));
 
 /* The description below is an attempt at providing documentation to eBPF
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index e502485c757a..5272879449d8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -3043,6 +3043,11 @@  static int __init bpf_global_ma_init(void)
 late_initcall(bpf_global_ma_init);
 #endif
 
+int __weak bpf_arch_poke_static_branch(struct bpf_prog *prog, u32 off, bool on)
+{
+	return -EOPNOTSUPP;
+}
+
 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 EXPORT_SYMBOL(bpf_stats_enabled_key);
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 97b0ba6ecf65..c3509e59f82d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1504,6 +1504,60 @@  static int map_lookup_elem(union bpf_attr *attr)
 	return err;
 }
 
+int parse_static_branch_insn(struct bpf_insn *insn, bool *inverse)
+{
+	__u8 code = insn->code;
+
+	if (code != (BPF_JMP | BPF_JA) && code != (BPF_JMP32 | BPF_JA))
+		return -EINVAL;
+
+	if (insn->src_reg & ~BPF_STATIC_BRANCH_MASK)
+		return -EINVAL;
+
+	if (!(insn->src_reg & BPF_STATIC_BRANCH_JA))
+		return -EINVAL;
+
+	if (insn->dst_reg)
+		return -EINVAL;
+
+	*inverse = !(insn->src_reg & BPF_STATIC_BRANCH_NOP);
+
+	return 0;
+}
+
+#define BPF_STATIC_BRANCH_UPDATE_LAST_FIELD static_branch.on
+
+static int bpf_static_branch_update(union bpf_attr *attr)
+{
+	bool on = attr->static_branch.on & 1;
+	struct bpf_prog *prog;
+	u32 insn_off;
+	bool inverse;
+	int ret;
+
+	if (CHECK_ATTR(BPF_STATIC_BRANCH_UPDATE))
+		return -EINVAL;
+
+	prog = bpf_prog_get(attr->static_branch.prog_fd);
+	if (IS_ERR(prog))
+		return PTR_ERR(prog);
+
+	insn_off = attr->static_branch.insn_off;
+	if (insn_off >= prog->len) {
+		ret = -ERANGE;
+		goto put_prog;
+	}
+
+	ret = parse_static_branch_insn(&prog->insnsi[insn_off], &inverse);
+	if (ret)
+		goto put_prog;
+
+	ret = bpf_arch_poke_static_branch(prog, insn_off, on ^ inverse);
+
+put_prog:
+	bpf_prog_put(prog);
+	return ret;
+}
 
 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 
@@ -5578,6 +5632,9 @@  static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
 	case BPF_MAP_DELETE_BATCH:
 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
 		break;
+	case BPF_STATIC_BRANCH_UPDATE:
+		err = bpf_static_branch_update(&attr);
+		break;
 	case BPF_LINK_CREATE:
 		err = link_create(&attr, uattr);
 		break;