Message ID | 20170404220810.14250-1-tycho@docker.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Apr 4, 2017 at 3:08 PM, Tycho Andersen <tycho@docker.com> wrote: > The goal of this patch is to protect the JIT against an attacker with a > write-in-memory primitive. The JIT allocates a buffer which will eventually > be marked +x, so we need to make sure that what was written to this buffer > is what was intended. > > We acheive this by building a hash of the instruction buffer as > instructions are emittted and then comparing that to a hash at the end of > the JIT compile after the buffer has been marked read-only. > > Signed-off-by: Tycho Andersen <tycho@docker.com> > CC: Daniel Borkmann <daniel@iogearbox.net> > CC: Alexei Starovoitov <ast@kernel.org> > CC: Kees Cook <keescook@chromium.org> > CC: Mickaël Salaün <mic@digikod.net> Cool! This closes the race condition on producing the JIT vs going read-only. I wonder if it might be possible to make this a more generic interface to the BPF which would be allocate the hash, provide the update callback during emit, and then do the hash check itself at the end of bpf_jit_binary_lock_ro()? -Kees > --- > arch/x86/Kconfig | 11 ++++ > arch/x86/net/bpf_jit_comp.c | 147 ++++++++++++++++++++++++++++++++++++++++---- > 2 files changed, 147 insertions(+), 11 deletions(-) > > diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig > index cc98d5a..7b2db2c 100644 > --- a/arch/x86/Kconfig > +++ b/arch/x86/Kconfig > @@ -2789,6 +2789,17 @@ config X86_DMA_REMAP > > source "net/Kconfig" > > +config EBPF_JIT_HASH_OUTPUT > + def_bool y > + depends on HAVE_EBPF_JIT > + depends on BPF_JIT > + select CRYPTO_SHA256 > + ---help--- > + Enables a double check of the JIT's output after it is marked read-only to > + ensure that it matches what the JIT generated. > + > + Note, only applies when /proc/sys/net/core/bpf_jit_harden > 0. > + > source "drivers/Kconfig" > > source "drivers/firmware/Kconfig" > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index 32322ce..be1271e 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -13,9 +13,15 @@ > #include <linux/if_vlan.h> > #include <asm/cacheflush.h> > #include <linux/bpf.h> > +#include <linux/crypto.h> > +#include <crypto/hash.h> > > int bpf_jit_enable __read_mostly; > > +#ifdef CONFIG_EBPF_JIT_HASH_OUTPUT > +struct crypto_shash *tfm __read_mostly; > +#endif > + > /* > * assembly code in arch/x86/net/bpf_jit.S > */ > @@ -25,7 +31,8 @@ extern u8 sk_load_byte_positive_offset[]; > extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; > extern u8 sk_load_byte_negative_offset[]; > > -static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) > +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len, > + struct shash_desc *hash) > { > if (len == 1) > *ptr = bytes; > @@ -35,11 +42,15 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) > *(u32 *)ptr = bytes; > barrier(); > } > + > + if (IS_ENABLED(CONFIG_EBPF_JIT_HASH_OUTPUT) && hash) > + crypto_shash_update(hash, (u8 *) &bytes, len); > + > return ptr + len; > } > > #define EMIT(bytes, len) \ > - do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) > + do { prog = emit_code(prog, bytes, len, hash); cnt += len; } while (0) > > #define EMIT1(b1) EMIT(b1, 1) > #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) > @@ -206,7 +217,7 @@ struct jit_context { > /* emit x64 prologue code for BPF program and check it's size. > * bpf_tail_call helper will skip it while jumping into another program > */ > -static void emit_prologue(u8 **pprog) > +static void emit_prologue(u8 **pprog, struct shash_desc *hash) > { > u8 *prog = *pprog; > int cnt = 0; > @@ -264,7 +275,7 @@ static void emit_prologue(u8 **pprog) > * goto *(prog->bpf_func + prologue_size); > * out: > */ > -static void emit_bpf_tail_call(u8 **pprog) > +static void emit_bpf_tail_call(u8 **pprog, struct shash_desc *hash) > { > u8 *prog = *pprog; > int label1, label2, label3; > @@ -328,7 +339,7 @@ static void emit_bpf_tail_call(u8 **pprog) > } > > > -static void emit_load_skb_data_hlen(u8 **pprog) > +static void emit_load_skb_data_hlen(u8 **pprog, struct shash_desc *hash) > { > u8 *prog = *pprog; > int cnt = 0; > @@ -348,7 +359,8 @@ static void emit_load_skb_data_hlen(u8 **pprog) > } > > static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, > - int oldproglen, struct jit_context *ctx) > + int oldproglen, struct jit_context *ctx, > + struct shash_desc *hash) > { > struct bpf_insn *insn = bpf_prog->insnsi; > int insn_cnt = bpf_prog->len; > @@ -360,10 +372,10 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, > int proglen = 0; > u8 *prog = temp; > > - emit_prologue(&prog); > + emit_prologue(&prog, hash); > > if (seen_ld_abs) > - emit_load_skb_data_hlen(&prog); > + emit_load_skb_data_hlen(&prog, hash); > > for (i = 0; i < insn_cnt; i++, insn++) { > const s32 imm32 = insn->imm; > @@ -875,7 +887,7 @@ xadd: if (is_imm8(insn->off)) > if (seen_ld_abs) { > if (reload_skb_data) { > EMIT1(0x5F); /* pop %rdi */ > - emit_load_skb_data_hlen(&prog); > + emit_load_skb_data_hlen(&prog, hash); > } else { > EMIT2(0x41, 0x59); /* pop %r9 */ > EMIT2(0x41, 0x5A); /* pop %r10 */ > @@ -884,7 +896,7 @@ xadd: if (is_imm8(insn->off)) > break; > > case BPF_JMP | BPF_CALL | BPF_X: > - emit_bpf_tail_call(&prog); > + emit_bpf_tail_call(&prog, hash); > break; > > /* cond jump */ > @@ -1085,6 +1097,106 @@ xadd: if (is_imm8(insn->off)) > return proglen; > } > > +#ifdef CONFIG_EBPF_JIT_HASH_OUTPUT > +static struct shash_desc *bpf_alloc_hash_desc(void) > +{ > + struct shash_desc *hash; > + int sz = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); > + > + hash = kzalloc(sz, GFP_KERNEL); > + if (hash) > + hash->tfm = tfm; > + return hash; > +} > + > +static int init_hash(struct shash_desc **hash, u32 *nonce) > +{ > + if (!bpf_jit_harden) > + return 0; > + > + *nonce = get_random_int(); > + > + if (!tfm) { > + tfm = crypto_alloc_shash("sha256", 0, 0); > + if (IS_ERR(tfm)) > + return PTR_ERR(tfm); > + } > + > + if (!*hash) { > + *hash = bpf_alloc_hash_desc(); > + if (!*hash) > + return -ENOMEM; > + } > + > + if (crypto_shash_init(*hash) < 0) > + return -1; > + > + return crypto_shash_update(*hash, (u8 *) nonce, sizeof(*nonce)); > +} > + > +static bool check_jit_hash(u8 *buf, u32 len, struct shash_desc *out_d, > + u32 nonce) > +{ > + struct shash_desc *check_d; > + void *out, *check; > + unsigned int sz; > + bool match = false; > + > + if (!out_d) > + return 0; > + > + BUG_ON(out_d->tfm != tfm); > + > + sz = crypto_shash_digestsize(out_d->tfm); > + out = kzalloc(2 * sz, GFP_KERNEL); > + if (!out) > + return false; > + > + if (crypto_shash_final(out_d, out) < 0) { > + kfree(out); > + return false; > + } > + > + check_d = bpf_alloc_hash_desc(); > + if (!check_d) { > + kfree(out); > + return false; > + } > + > + if (crypto_shash_init(check_d) < 0) > + goto out; > + > + if (crypto_shash_update(check_d, (u8 *) &nonce, sizeof(nonce)) < 0) > + goto out; > + > + if (crypto_shash_update(check_d, buf, len) < 0) > + goto out; > + > + check = out + sz; > + if (crypto_shash_final(check_d, check) < 0) > + goto out; > + > + if (!memcmp(out, check, sz)) > + match = true; > + > +out: > + kfree(out); > + kfree(check_d); > + return match; > +} > +#else > +static int init_hash(struct shash_desc **hash, u32 *nonce) > +{ > + return 0; > +} > + > +static bool check_jit_hash(u8 *buf, u32 len, struct shash_desc *out_d, > + u32 nonce) > +{ > + return true; > +} > +#endif > + > struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) > { > struct bpf_binary_header *header = NULL; > @@ -1096,6 +1208,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) > int *addrs; > int pass; > int i; > + struct shash_desc *hash = NULL; > + u32 nonce; > > if (!bpf_jit_enable) > return orig_prog; > @@ -1132,7 +1246,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) > * pass to emit the final image > */ > for (pass = 0; pass < 10 || image; pass++) { > - proglen = do_jit(prog, addrs, image, oldproglen, &ctx); > + if (init_hash(&hash, &nonce) < 0) { > + image = NULL; > + if (header) > + bpf_jit_binary_free(header); > + prog = orig_prog; > + goto out_addrs; > + } > + > + proglen = do_jit(prog, addrs, image, oldproglen, &ctx, hash); > if (proglen <= 0) { > image = NULL; > if (header) > @@ -1166,6 +1288,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) > if (image) { > bpf_flush_icache(header, image + proglen); > bpf_jit_binary_lock_ro(header); > + if (!check_jit_hash(image, proglen, hash, nonce)) > + BUG(); > prog->bpf_func = (void *)image; > prog->jited = 1; > } else { > @@ -1174,6 +1298,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) > > out_addrs: > kfree(addrs); > + kfree(hash); > out: > if (tmp_blinded) > bpf_jit_prog_release_other(prog, prog == orig_prog ? > -- > 2.9.3 >
Hi Kees, On Tue, Apr 04, 2017 at 03:17:57PM -0700, Kees Cook wrote: > On Tue, Apr 4, 2017 at 3:08 PM, Tycho Andersen <tycho@docker.com> wrote: > > The goal of this patch is to protect the JIT against an attacker with a > > write-in-memory primitive. The JIT allocates a buffer which will eventually > > be marked +x, so we need to make sure that what was written to this buffer > > is what was intended. > > > > We acheive this by building a hash of the instruction buffer as > > instructions are emittted and then comparing that to a hash at the end of > > the JIT compile after the buffer has been marked read-only. > > > > Signed-off-by: Tycho Andersen <tycho@docker.com> > > CC: Daniel Borkmann <daniel@iogearbox.net> > > CC: Alexei Starovoitov <ast@kernel.org> > > CC: Kees Cook <keescook@chromium.org> > > CC: Mickaël Salaün <mic@digikod.net> > > Cool! This closes the race condition on producing the JIT vs going > read-only. I wonder if it might be possible to make this a more > generic interface to the BPF which would be allocate the hash, provide > the update callback during emit, and then do the hash check itself at > the end of bpf_jit_binary_lock_ro()? Yes, probably so. I can look into that for the next version. Tycho
Hi Tycho,
[auto build test WARNING on net/master]
[also build test WARNING on v4.11-rc5 next-20170405]
[cannot apply to tip/x86/core]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Tycho-Andersen/ebpf-verify-the-output-of-the-JIT/20170406-004746
config: x86_64-allmodconfig (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64
All warnings (new ones prefixed by >>):
arch/x86/net/bpf_jit_comp.c: In function 'do_jit':
>> arch/x86/net/bpf_jit_comp.c:1098:1: warning: the frame size of 13024 bytes is larger than 8192 bytes [-Wframe-larger-than=]
}
^
vim +1098 arch/x86/net/bpf_jit_comp.c
9383191d Daniel Borkmann 2017-02-16 1082 pr_err("bpf_jit: fatal insn size error\n");
e0ee9c12 Alexei Starovoitov 2014-10-10 1083 return -EFAULT;
e0ee9c12 Alexei Starovoitov 2014-10-10 1084 }
e0ee9c12 Alexei Starovoitov 2014-10-10 1085
0a14842f Eric Dumazet 2011-04-20 1086 if (image) {
0a14842f Eric Dumazet 2011-04-20 1087 if (unlikely(proglen + ilen > oldproglen)) {
9383191d Daniel Borkmann 2017-02-16 1088 pr_err("bpf_jit: fatal error\n");
f3c2af7b Alexei Starovoitov 2014-05-13 1089 return -EFAULT;
0a14842f Eric Dumazet 2011-04-20 1090 }
0a14842f Eric Dumazet 2011-04-20 1091 memcpy(image + proglen, temp, ilen);
0a14842f Eric Dumazet 2011-04-20 1092 }
0a14842f Eric Dumazet 2011-04-20 1093 proglen += ilen;
0a14842f Eric Dumazet 2011-04-20 1094 addrs[i] = proglen;
0a14842f Eric Dumazet 2011-04-20 1095 prog = temp;
0a14842f Eric Dumazet 2011-04-20 1096 }
f3c2af7b Alexei Starovoitov 2014-05-13 1097 return proglen;
f3c2af7b Alexei Starovoitov 2014-05-13 @1098 }
f3c2af7b Alexei Starovoitov 2014-05-13 1099
19d23b2d Tycho Andersen 2017-04-04 1100 #ifdef CONFIG_EBPF_JIT_HASH_OUTPUT
19d23b2d Tycho Andersen 2017-04-04 1101 static struct shash_desc *bpf_alloc_hash_desc(void)
19d23b2d Tycho Andersen 2017-04-04 1102 {
19d23b2d Tycho Andersen 2017-04-04 1103 struct shash_desc *hash;
19d23b2d Tycho Andersen 2017-04-04 1104 int sz = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
19d23b2d Tycho Andersen 2017-04-04 1105
19d23b2d Tycho Andersen 2017-04-04 1106 hash = kzalloc(sz, GFP_KERNEL);
:::::: The code at line 1098 was first introduced by commit
:::::: f3c2af7ba17a83809806880062c9ad541744fb95 net: filter: x86: split bpf_jit_compile()
:::::: TO: Alexei Starovoitov <ast@plumgrid.com>
:::::: CC: David S. Miller <davem@davemloft.net>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On Tue, Apr 04, 2017 at 09:45:36PM -0600, Tycho Andersen wrote: > Hi Kees, > > On Tue, Apr 04, 2017 at 03:17:57PM -0700, Kees Cook wrote: > > On Tue, Apr 4, 2017 at 3:08 PM, Tycho Andersen <tycho@docker.com> wrote: > > > The goal of this patch is to protect the JIT against an attacker with a > > > write-in-memory primitive. The JIT allocates a buffer which will eventually > > > be marked +x, so we need to make sure that what was written to this buffer > > > is what was intended. > > > > > > We acheive this by building a hash of the instruction buffer as > > > instructions are emittted and then comparing that to a hash at the end of > > > the JIT compile after the buffer has been marked read-only. > > > > > > Signed-off-by: Tycho Andersen <tycho@docker.com> > > > CC: Daniel Borkmann <daniel@iogearbox.net> > > > CC: Alexei Starovoitov <ast@kernel.org> > > > CC: Kees Cook <keescook@chromium.org> > > > CC: Mickaël Salaün <mic@digikod.net> > > > > Cool! This closes the race condition on producing the JIT vs going > > read-only. I wonder if it might be possible to make this a more > > generic interface to the BPF which would be allocate the hash, provide > > the update callback during emit, and then do the hash check itself at > > the end of bpf_jit_binary_lock_ro()? > > Yes, probably so. I can look into that for the next version. Nack. Please stop wasting yours and our time with buggy code that pretends to fix a problem that doesn't exist. This security paranoia around JIT must stop. Make sure that CONFIG_BPF_JIT is off in your system.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc98d5a..7b2db2c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2789,6 +2789,17 @@ config X86_DMA_REMAP source "net/Kconfig" +config EBPF_JIT_HASH_OUTPUT + def_bool y + depends on HAVE_EBPF_JIT + depends on BPF_JIT + select CRYPTO_SHA256 + ---help--- + Enables a double check of the JIT's output after it is marked read-only to + ensure that it matches what the JIT generated. + + Note, only applies when /proc/sys/net/core/bpf_jit_harden > 0. + source "drivers/Kconfig" source "drivers/firmware/Kconfig" diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 32322ce..be1271e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -13,9 +13,15 @@ #include <linux/if_vlan.h> #include <asm/cacheflush.h> #include <linux/bpf.h> +#include <linux/crypto.h> +#include <crypto/hash.h> int bpf_jit_enable __read_mostly; +#ifdef CONFIG_EBPF_JIT_HASH_OUTPUT +struct crypto_shash *tfm __read_mostly; +#endif + /* * assembly code in arch/x86/net/bpf_jit.S */ @@ -25,7 +31,8 @@ extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; extern u8 sk_load_byte_negative_offset[]; -static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) +static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len, + struct shash_desc *hash) { if (len == 1) *ptr = bytes; @@ -35,11 +42,15 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) *(u32 *)ptr = bytes; barrier(); } + + if (IS_ENABLED(CONFIG_EBPF_JIT_HASH_OUTPUT) && hash) + crypto_shash_update(hash, (u8 *) &bytes, len); + return ptr + len; } #define EMIT(bytes, len) \ - do { prog = emit_code(prog, bytes, len); cnt += len; } while (0) + do { prog = emit_code(prog, bytes, len, hash); cnt += len; } while (0) #define EMIT1(b1) EMIT(b1, 1) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) @@ -206,7 +217,7 @@ struct jit_context { /* emit x64 prologue code for BPF program and check it's size. * bpf_tail_call helper will skip it while jumping into another program */ -static void emit_prologue(u8 **pprog) +static void emit_prologue(u8 **pprog, struct shash_desc *hash) { u8 *prog = *pprog; int cnt = 0; @@ -264,7 +275,7 @@ static void emit_prologue(u8 **pprog) * goto *(prog->bpf_func + prologue_size); * out: */ -static void emit_bpf_tail_call(u8 **pprog) +static void emit_bpf_tail_call(u8 **pprog, struct shash_desc *hash) { u8 *prog = *pprog; int label1, label2, label3; @@ -328,7 +339,7 @@ static void emit_bpf_tail_call(u8 **pprog) } -static void emit_load_skb_data_hlen(u8 **pprog) +static void emit_load_skb_data_hlen(u8 **pprog, struct shash_desc *hash) { u8 *prog = *pprog; int cnt = 0; @@ -348,7 +359,8 @@ static void emit_load_skb_data_hlen(u8 **pprog) } static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, - int oldproglen, struct jit_context *ctx) + int oldproglen, struct jit_context *ctx, + struct shash_desc *hash) { struct bpf_insn *insn = bpf_prog->insnsi; int insn_cnt = bpf_prog->len; @@ -360,10 +372,10 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, int proglen = 0; u8 *prog = temp; - emit_prologue(&prog); + emit_prologue(&prog, hash); if (seen_ld_abs) - emit_load_skb_data_hlen(&prog); + emit_load_skb_data_hlen(&prog, hash); for (i = 0; i < insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; @@ -875,7 +887,7 @@ xadd: if (is_imm8(insn->off)) if (seen_ld_abs) { if (reload_skb_data) { EMIT1(0x5F); /* pop %rdi */ - emit_load_skb_data_hlen(&prog); + emit_load_skb_data_hlen(&prog, hash); } else { EMIT2(0x41, 0x59); /* pop %r9 */ EMIT2(0x41, 0x5A); /* pop %r10 */ @@ -884,7 +896,7 @@ xadd: if (is_imm8(insn->off)) break; case BPF_JMP | BPF_CALL | BPF_X: - emit_bpf_tail_call(&prog); + emit_bpf_tail_call(&prog, hash); break; /* cond jump */ @@ -1085,6 +1097,106 @@ xadd: if (is_imm8(insn->off)) return proglen; } +#ifdef CONFIG_EBPF_JIT_HASH_OUTPUT +static struct shash_desc *bpf_alloc_hash_desc(void) +{ + struct shash_desc *hash; + int sz = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); + + hash = kzalloc(sz, GFP_KERNEL); + if (hash) + hash->tfm = tfm; + return hash; +} + +static int init_hash(struct shash_desc **hash, u32 *nonce) +{ + if (!bpf_jit_harden) + return 0; + + *nonce = get_random_int(); + + if (!tfm) { + tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + } + + if (!*hash) { + *hash = bpf_alloc_hash_desc(); + if (!*hash) + return -ENOMEM; + } + + if (crypto_shash_init(*hash) < 0) + return -1; + + return crypto_shash_update(*hash, (u8 *) nonce, sizeof(*nonce)); +} + +static bool check_jit_hash(u8 *buf, u32 len, struct shash_desc *out_d, + u32 nonce) +{ + struct shash_desc *check_d; + void *out, *check; + unsigned int sz; + bool match = false; + + if (!out_d) + return 0; + + BUG_ON(out_d->tfm != tfm); + + sz = crypto_shash_digestsize(out_d->tfm); + out = kzalloc(2 * sz, GFP_KERNEL); + if (!out) + return false; + + if (crypto_shash_final(out_d, out) < 0) { + kfree(out); + return false; + } + + check_d = bpf_alloc_hash_desc(); + if (!check_d) { + kfree(out); + return false; + } + + if (crypto_shash_init(check_d) < 0) + goto out; + + if (crypto_shash_update(check_d, (u8 *) &nonce, sizeof(nonce)) < 0) + goto out; + + if (crypto_shash_update(check_d, buf, len) < 0) + goto out; + + check = out + sz; + if (crypto_shash_final(check_d, check) < 0) + goto out; + + if (!memcmp(out, check, sz)) + match = true; + +out: + kfree(out); + kfree(check_d); + return match; +} +#else +static int init_hash(struct shash_desc **hash, u32 *nonce) +{ + return 0; +} + +static bool check_jit_hash(u8 *buf, u32 len, struct shash_desc *out_d, + u32 nonce) +{ + return true; +} +#endif + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_binary_header *header = NULL; @@ -1096,6 +1208,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) int *addrs; int pass; int i; + struct shash_desc *hash = NULL; + u32 nonce; if (!bpf_jit_enable) return orig_prog; @@ -1132,7 +1246,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * pass to emit the final image */ for (pass = 0; pass < 10 || image; pass++) { - proglen = do_jit(prog, addrs, image, oldproglen, &ctx); + if (init_hash(&hash, &nonce) < 0) { + image = NULL; + if (header) + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_addrs; + } + + proglen = do_jit(prog, addrs, image, oldproglen, &ctx, hash); if (proglen <= 0) { image = NULL; if (header) @@ -1166,6 +1288,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (image) { bpf_flush_icache(header, image + proglen); bpf_jit_binary_lock_ro(header); + if (!check_jit_hash(image, proglen, hash, nonce)) + BUG(); prog->bpf_func = (void *)image; prog->jited = 1; } else { @@ -1174,6 +1298,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) out_addrs: kfree(addrs); + kfree(hash); out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ?
The goal of this patch is to protect the JIT against an attacker with a write-in-memory primitive. The JIT allocates a buffer which will eventually be marked +x, so we need to make sure that what was written to this buffer is what was intended. We acheive this by building a hash of the instruction buffer as instructions are emittted and then comparing that to a hash at the end of the JIT compile after the buffer has been marked read-only. Signed-off-by: Tycho Andersen <tycho@docker.com> CC: Daniel Borkmann <daniel@iogearbox.net> CC: Alexei Starovoitov <ast@kernel.org> CC: Kees Cook <keescook@chromium.org> CC: Mickaël Salaün <mic@digikod.net> --- arch/x86/Kconfig | 11 ++++ arch/x86/net/bpf_jit_comp.c | 147 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 147 insertions(+), 11 deletions(-)