@@ -1976,7 +1976,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *orig_call)
{
int ret, i, nr_args = m->nr_args;
- int regs_off, ip_off, args_off, stack_size = nr_args * 8;
+ int regs_off, flags_off, ip_off, args_off, stack_size = nr_args * 8;
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
@@ -2019,6 +2019,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
stack_size += 8;
args_off = stack_size;
+ if (flags & BPF_TRAMP_F_IP_ARG)
+ stack_size += 8; /* room for flags */
+
+ flags_off = stack_size;
+
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
@@ -2044,6 +2049,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ /* Store flags
+ * move rax, flags
+ * mov QWORD PTR [rbp - flags_off], rax
+ */
+ emit_mov_imm64(&prog, BPF_REG_0, 0,
+ (u32) (flags & BPF_TRAMP_F_IP_ARG));
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -flags_off);
+ }
+
if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function:
* mov rax, QWORD PTR [rbp + 8]
@@ -13585,7 +13585,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_ip) {
/* Load IP address from ctx - 16 */
- insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
+ insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -24);
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
if (!new_prog)
@@ -1009,10 +1009,31 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
.arg5_type = ARG_ANYTHING,
};
+static int get_trampo_var_off(void *ctx, u32 flag)
+{
+ int off = 2; /* All variables are placed before flags */
+ u32 flags = (u32)((u64 *)ctx)[-2];
+
+ if (!(flags & flag))
+ return -1; /* The variable is not there */
+ if (flag & (flag - 1))
+ return -1; /* 2 or more bits are set */
+
+ for (; flags & flag; flags &= flags - 1)
+ off++;
+
+ return off;
+}
+
BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
{
/* This helper call is inlined by verifier. */
- return ((u64 *)ctx)[-2];
+ int off = get_trampo_var_off(ctx, BPF_TRAMP_F_IP_ARG);
+
+ if (off < 0)
+ return 0;
+
+ return ((u64 *)ctx)[-off];
}
static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
The flags indicate what values are below nargs. It appears only if one or more values are there. The order in the flags, from LSB to MSB, indicates the order of values in the trampoline frame. LSB is at the highest location, close to the flags and nargs. Signed-off-by: Kui-Feng Lee <kuifeng@fb.com> --- arch/x86/net/bpf_jit_comp.c | 17 ++++++++++++++++- kernel/bpf/verifier.c | 2 +- kernel/trace/bpf_trace.c | 23 ++++++++++++++++++++++- 3 files changed, 39 insertions(+), 3 deletions(-)