Message ID | d4a44c52712468b805cbf5c244b3c9ba0f802ab8.1633104510.git.naveen.n.rao@linux.vnet.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | powerpc/bpf: Various fixes | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
bpf/vmtest-bpf | success | VM_Test |
bpf/vmtest-bpf-PR | success | PR summary |
bpf/vmtest-bpf-next-PR | success | PR summary |
bpf/vmtest-bpf-next | success | VM_Test |
On Fri, Oct 1, 2021 at 2:16 PM Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> wrote: > > Add checks to ensure that we never emit branch instructions with > truncated branch offsets. > > Suggested-by: Michael Ellerman <mpe@ellerman.id.au> > Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Song Liu <songliubraving@fb.com> > --- > arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ > arch/powerpc/net/bpf_jit_comp.c | 6 +++++- > arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- > arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- > 4 files changed, 37 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h > index 935ea95b66359e..7e9b978b768ed9 100644 > --- a/arch/powerpc/net/bpf_jit.h > +++ b/arch/powerpc/net/bpf_jit.h > @@ -24,16 +24,30 @@ > #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) > > /* Long jump; (unconditional 'branch') */ > -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ > - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) > +#define PPC_JMP(dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_branch_range(offset)) { \ > + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ > + } while (0) > + > /* blr; (unconditional 'branch' with link) to absolute address */ > #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ > (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) > /* "cond" here covers BO:BI fields. */ > -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ > - (((cond) & 0x3ff) << 16) | \ > - (((dest) - (ctx->idx * 4)) & \ > - 0xfffc)) > +#define PPC_BCC_SHORT(cond, dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_cond_branch_range(offset)) { \ > + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ > + } while (0) > + > /* Sign-extended 32-bit immediate load */ > #define PPC_LI32(d, i) do { \ > if ((int)(uintptr_t)(i) >= -32768 && \ > diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c > index 53aefee3fe70be..fcbf7a917c566e 100644 > --- a/arch/powerpc/net/bpf_jit_comp.c > +++ b/arch/powerpc/net/bpf_jit_comp.c > @@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) > /* Now build the prologue, body code & epilogue for real. */ > cgctx.idx = 0; > bpf_jit_build_prologue(code_base, &cgctx); > - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); > + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { > + bpf_jit_binary_free(bpf_hdr); > + fp = org_fp; > + goto out_addrs; > + } > bpf_jit_build_epilogue(code_base, &cgctx); > > if (bpf_jit_enable > 1) > diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c > index beb12cbc8c2994..a74d52204f8da2 100644 > --- a/arch/powerpc/net/bpf_jit_comp32.c > +++ b/arch/powerpc/net/bpf_jit_comp32.c > @@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > } > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3-r6 > @@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -1090,7 +1092,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: > diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c > index b87a63dba9c8fb..f06c62089b1457 100644 > --- a/arch/powerpc/net/bpf_jit_comp64.c > +++ b/arch/powerpc/net/bpf_jit_comp64.c > @@ -206,7 +206,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > EMIT(PPC_RAW_BCTRL()); > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3, r4 and r5 > @@ -267,7 +267,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -993,7 +995,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: > -- > 2.33.0 >
On Fri, Oct 1, 2021 at 11:15 PM Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> wrote: > > Add checks to ensure that we never emit branch instructions with > truncated branch offsets. > > Suggested-by: Michael Ellerman <mpe@ellerman.id.au> > Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> Tested-by: Johan Almbladh <johan.almbladh@anyfinetworks.com> > --- > arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ > arch/powerpc/net/bpf_jit_comp.c | 6 +++++- > arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- > arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- > 4 files changed, 37 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h > index 935ea95b66359e..7e9b978b768ed9 100644 > --- a/arch/powerpc/net/bpf_jit.h > +++ b/arch/powerpc/net/bpf_jit.h > @@ -24,16 +24,30 @@ > #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) > > /* Long jump; (unconditional 'branch') */ > -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ > - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) > +#define PPC_JMP(dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_branch_range(offset)) { \ > + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ > + } while (0) > + > /* blr; (unconditional 'branch' with link) to absolute address */ > #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ > (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) > /* "cond" here covers BO:BI fields. */ > -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ > - (((cond) & 0x3ff) << 16) | \ > - (((dest) - (ctx->idx * 4)) & \ > - 0xfffc)) > +#define PPC_BCC_SHORT(cond, dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_cond_branch_range(offset)) { \ > + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ > + } while (0) > + > /* Sign-extended 32-bit immediate load */ > #define PPC_LI32(d, i) do { \ > if ((int)(uintptr_t)(i) >= -32768 && \ > diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c > index 53aefee3fe70be..fcbf7a917c566e 100644 > --- a/arch/powerpc/net/bpf_jit_comp.c > +++ b/arch/powerpc/net/bpf_jit_comp.c > @@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) > /* Now build the prologue, body code & epilogue for real. */ > cgctx.idx = 0; > bpf_jit_build_prologue(code_base, &cgctx); > - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); > + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { > + bpf_jit_binary_free(bpf_hdr); > + fp = org_fp; > + goto out_addrs; > + } > bpf_jit_build_epilogue(code_base, &cgctx); > > if (bpf_jit_enable > 1) > diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c > index beb12cbc8c2994..a74d52204f8da2 100644 > --- a/arch/powerpc/net/bpf_jit_comp32.c > +++ b/arch/powerpc/net/bpf_jit_comp32.c > @@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > } > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3-r6 > @@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -1090,7 +1092,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: > diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c > index b87a63dba9c8fb..f06c62089b1457 100644 > --- a/arch/powerpc/net/bpf_jit_comp64.c > +++ b/arch/powerpc/net/bpf_jit_comp64.c > @@ -206,7 +206,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > EMIT(PPC_RAW_BCTRL()); > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3, r4 and r5 > @@ -267,7 +267,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -993,7 +995,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: > -- > 2.33.0 >
Le 01/10/2021 à 23:14, Naveen N. Rao a écrit : > Add checks to ensure that we never emit branch instructions with > truncated branch offsets. > > Suggested-by: Michael Ellerman <mpe@ellerman.id.au> > Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> > --- > arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ > arch/powerpc/net/bpf_jit_comp.c | 6 +++++- > arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- > arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- > 4 files changed, 37 insertions(+), 11 deletions(-) > > diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h > index 935ea95b66359e..7e9b978b768ed9 100644 > --- a/arch/powerpc/net/bpf_jit.h > +++ b/arch/powerpc/net/bpf_jit.h > @@ -24,16 +24,30 @@ > #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) > > /* Long jump; (unconditional 'branch') */ > -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ > - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) > +#define PPC_JMP(dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_branch_range(offset)) { \ > + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ Does it really deserves a KERN_ERR ? Isn't that something that can trigger with a userland request ? > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ > + } while (0) > + > /* blr; (unconditional 'branch' with link) to absolute address */ > #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ > (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) > /* "cond" here covers BO:BI fields. */ > -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ > - (((cond) & 0x3ff) << 16) | \ > - (((dest) - (ctx->idx * 4)) & \ > - 0xfffc)) > +#define PPC_BCC_SHORT(cond, dest) \ > + do { \ > + long offset = (long)(dest) - (ctx->idx * 4); \ > + if (!is_offset_in_cond_branch_range(offset)) { \ > + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ Same > + return -ERANGE; \ > + } \ > + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ > + } while (0) > + > /* Sign-extended 32-bit immediate load */ > #define PPC_LI32(d, i) do { \ > if ((int)(uintptr_t)(i) >= -32768 && \ > diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c > index 53aefee3fe70be..fcbf7a917c566e 100644 > --- a/arch/powerpc/net/bpf_jit_comp.c > +++ b/arch/powerpc/net/bpf_jit_comp.c > @@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) > /* Now build the prologue, body code & epilogue for real. */ > cgctx.idx = 0; > bpf_jit_build_prologue(code_base, &cgctx); > - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); > + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { > + bpf_jit_binary_free(bpf_hdr); > + fp = org_fp; > + goto out_addrs; > + } > bpf_jit_build_epilogue(code_base, &cgctx); > > if (bpf_jit_enable > 1) > diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c > index beb12cbc8c2994..a74d52204f8da2 100644 > --- a/arch/powerpc/net/bpf_jit_comp32.c > +++ b/arch/powerpc/net/bpf_jit_comp32.c > @@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > } > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3-r6 > @@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -1090,7 +1092,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: > diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c > index b87a63dba9c8fb..f06c62089b1457 100644 > --- a/arch/powerpc/net/bpf_jit_comp64.c > +++ b/arch/powerpc/net/bpf_jit_comp64.c > @@ -206,7 +206,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun > EMIT(PPC_RAW_BCTRL()); > } > > -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) > { > /* > * By now, the eBPF program has already setup parameters in r3, r4 and r5 > @@ -267,7 +267,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 > bpf_jit_emit_common_epilogue(image, ctx); > > EMIT(PPC_RAW_BCTR()); > + > /* out: */ > + return 0; > } > > /* Assemble the body code between the prologue & epilogue */ > @@ -993,7 +995,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * > */ > case BPF_JMP | BPF_TAIL_CALL: > ctx->seen |= SEEN_TAILCALL; > - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); > + if (ret < 0) > + return ret; > break; > > default: >
Christophe Leroy wrote: > > > Le 01/10/2021 à 23:14, Naveen N. Rao a écrit : >> Add checks to ensure that we never emit branch instructions with >> truncated branch offsets. >> >> Suggested-by: Michael Ellerman <mpe@ellerman.id.au> >> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> >> --- >> arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ >> arch/powerpc/net/bpf_jit_comp.c | 6 +++++- >> arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- >> arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- >> 4 files changed, 37 insertions(+), 11 deletions(-) >> >> diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h >> index 935ea95b66359e..7e9b978b768ed9 100644 >> --- a/arch/powerpc/net/bpf_jit.h >> +++ b/arch/powerpc/net/bpf_jit.h >> @@ -24,16 +24,30 @@ >> #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) >> >> /* Long jump; (unconditional 'branch') */ >> -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ >> - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) >> +#define PPC_JMP(dest) \ >> + do { \ >> + long offset = (long)(dest) - (ctx->idx * 4); \ >> + if (!is_offset_in_branch_range(offset)) { \ >> + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ > > Does it really deserves a KERN_ERR ? The intent is to ensure that we handle this when JIT'ing the BPF instruction. One of the subsequent patches fixes the only scenario where we can hit this today. In practice, we should never hit this and if we do see this, then it is a bug with the JIT. > Isn't that something that can trigger with a userland request ? This can't be triggered by unprivileged BPF programs since those are limited to 4096 BPF instructions. You need root privileges to load large enough BPF programs that can trigger out of range branches. - Naveen
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 935ea95b66359e..7e9b978b768ed9 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -24,16 +24,30 @@ #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) /* Long jump; (unconditional 'branch') */ -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +#define PPC_JMP(dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_branch_range(offset)) { \ + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ + } while (0) + /* blr; (unconditional 'branch' with link) to absolute address */ #define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \ (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc)) /* "cond" here covers BO:BI fields. */ -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ - (((cond) & 0x3ff) << 16) | \ - (((dest) - (ctx->idx * 4)) & \ - 0xfffc)) +#define PPC_BCC_SHORT(cond, dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_cond_branch_range(offset)) { \ + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) + /* Sign-extended 32-bit immediate load */ #define PPC_LI32(d, i) do { \ if ((int)(uintptr_t)(i) >= -32768 && \ diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 53aefee3fe70be..fcbf7a917c566e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) /* Now build the prologue, body code & epilogue for real. */ cgctx.idx = 0; bpf_jit_build_prologue(code_base, &cgctx); - bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); + if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) { + bpf_jit_binary_free(bpf_hdr); + fp = org_fp; + goto out_addrs; + } bpf_jit_build_epilogue(code_base, &cgctx); if (bpf_jit_enable > 1) diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index beb12cbc8c2994..a74d52204f8da2 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -200,7 +200,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun } } -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) { /* * By now, the eBPF program has already setup parameters in r3-r6 @@ -261,7 +261,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 bpf_jit_emit_common_epilogue(image, ctx); EMIT(PPC_RAW_BCTR()); + /* out: */ + return 0; } /* Assemble the body code between the prologue & epilogue */ @@ -1090,7 +1092,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * */ case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; break; default: diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index b87a63dba9c8fb..f06c62089b1457 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -206,7 +206,7 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun EMIT(PPC_RAW_BCTRL()); } -static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) { /* * By now, the eBPF program has already setup parameters in r3, r4 and r5 @@ -267,7 +267,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 bpf_jit_emit_common_epilogue(image, ctx); EMIT(PPC_RAW_BCTR()); + /* out: */ + return 0; } /* Assemble the body code between the prologue & epilogue */ @@ -993,7 +995,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * */ case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; break; default:
Add checks to ensure that we never emit branch instructions with truncated branch offsets. Suggested-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> --- arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ arch/powerpc/net/bpf_jit_comp.c | 6 +++++- arch/powerpc/net/bpf_jit_comp32.c | 8 ++++++-- arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- 4 files changed, 37 insertions(+), 11 deletions(-)