Message ID | 883c5a268483a89ab13ed630210328a926f16e5b.1708526584.git.christophe.leroy@csgroup.eu (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | [bpf-next,v2] bpf: Check return from set_memory_rox() and friends | expand |
On 2/21/24 3:45 PM, Christophe Leroy wrote: > arch_protect_bpf_trampoline() and alloc_new_pack() call > set_memory_rox() which can fail, leading to unprotected memory. > > Take into account return from set_memory_XX() functions and add > __must_check flag to arch_protect_bpf_trampoline(). > > Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> > Reviewed-by: Kees Cook <keescook@chromium.org> > --- > v2: > - Move list_add_tail(&pack->list, &pack_list) at the end of alloc_new_pack() > - Split 2 lines that are reported longer than 80 chars by BPF patchwork's checkpatch report. > --- > arch/x86/net/bpf_jit_comp.c | 6 ++++-- > include/linux/bpf.h | 4 ++-- > kernel/bpf/bpf_struct_ops.c | 9 +++++++-- > kernel/bpf/core.c | 29 ++++++++++++++++++++++------- > kernel/bpf/trampoline.c | 18 ++++++++++++------ > net/bpf/bpf_dummy_struct_ops.c | 4 +++- > 6 files changed, 50 insertions(+), 20 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index e1390d1e331b..128c8ec9164e 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -2780,12 +2780,14 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) > bpf_prog_pack_free(image, size); > } > > -void arch_protect_bpf_trampoline(void *image, unsigned int size) > +int arch_protect_bpf_trampoline(void *image, unsigned int size) > { > + return 0; > } > > -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) > +int arch_unprotect_bpf_trampoline(void *image, unsigned int size) > { > + return 0; > } > > int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index b86bd15a051d..bb2723c264df 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -1116,8 +1116,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i > void *func_addr); > void *arch_alloc_bpf_trampoline(unsigned int size); > void arch_free_bpf_trampoline(void *image, unsigned int size); > -void arch_protect_bpf_trampoline(void *image, unsigned int size); > -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); > +int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); > +int arch_unprotect_bpf_trampoline(void *image, unsigned int size); > int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, > struct bpf_tramp_links *tlinks, void *func_addr); > > diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c > index 0decd862dfe0..d920afb0dd60 100644 > --- a/kernel/bpf/bpf_struct_ops.c > +++ b/kernel/bpf/bpf_struct_ops.c > @@ -488,7 +488,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, > if (err) > goto reset_unlock; > } > - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); > + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); > + if (err) > + goto reset_unlock; > /* Let bpf_link handle registration & unregistration. > * > * Pair with smp_load_acquire() during lookup_elem(). > @@ -497,7 +499,10 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, > goto unlock; > } > > - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); > + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); > + if (err) > + goto reset_unlock; > + > err = st_ops->reg(kdata); > if (likely(!err)) { > /* This refcnt increment on the map here after > diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c > index c49619ef55d0..eb2256ba6daf 100644 > --- a/kernel/bpf/core.c > +++ b/kernel/bpf/core.c > @@ -898,23 +898,31 @@ static LIST_HEAD(pack_list); > static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) > { > struct bpf_prog_pack *pack; > + int err; > > pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), > GFP_KERNEL); > if (!pack) > return NULL; > pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); > - if (!pack->ptr) { > - kfree(pack); > - return NULL; > - } > + if (!pack->ptr) > + goto out; > bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); > bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); > - list_add_tail(&pack->list, &pack_list); > > set_vm_flush_reset_perms(pack->ptr); > - set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); > + err = set_memory_rox((unsigned long)pack->ptr, > + BPF_PROG_PACK_SIZE / PAGE_SIZE); > + if (err) > + goto out_free; > + list_add_tail(&pack->list, &pack_list); > return pack; > + > +out_free: > + bpf_jit_free_exec(pack->ptr); > +out: > + kfree(pack); > + return NULL; > } > > void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) > @@ -929,9 +937,16 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) > size = round_up(size, PAGE_SIZE); > ptr = bpf_jit_alloc_exec(size); > if (ptr) { > + int err; > + > bpf_fill_ill_insns(ptr, size); > set_vm_flush_reset_perms(ptr); > - set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); > + err = set_memory_rox((unsigned long)ptr, > + size / PAGE_SIZE); > + if (err) { > + bpf_jit_free_exec(ptr); > + ptr = NULL; > + } > } > goto out; > } > diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c > index d382f5ebe06c..6e64ac9083b6 100644 > --- a/kernel/bpf/trampoline.c > +++ b/kernel/bpf/trampoline.c > @@ -456,7 +456,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut > if (err < 0) > goto out_free; > > - arch_protect_bpf_trampoline(im->image, im->size); > + err = arch_protect_bpf_trampoline(im->image, im->size); > + if (err) > + goto out_free; > > WARN_ON(tr->cur_image && total == 0); > if (tr->cur_image) > @@ -1072,17 +1074,21 @@ void __weak arch_free_bpf_trampoline(void *image, unsigned int size) > bpf_jit_free_exec(image); > } > > -void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) > +int __weak arch_protect_bpf_trampoline(void *image, unsigned int size) nit: Should we add __must_check as well here? > { > WARN_ON_ONCE(size > PAGE_SIZE); > - set_memory_rox((long)image, 1); > + return set_memory_rox((long)image, 1); > } > > -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) > +int __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) > { > + int err; > WARN_ON_ONCE(size > PAGE_SIZE); > - set_memory_nx((long)image, 1); > - set_memory_rw((long)image, 1); > + > + err = set_memory_nx((long)image, 1); > + if (err) > + return err; > + return set_memory_rw((long)image, 1); > } Do we still need this? It looks like this does not have an in-tree user anymore. diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index c5b461dda438..132c8ffba109 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2180,10 +2180,6 @@ void arch_protect_bpf_trampoline(void *image, unsigned int size) { } -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ -} - int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, void *ro_image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a7ba8e178645..7a56d2d84512 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3008,10 +3008,6 @@ void arch_protect_bpf_trampoline(void *image, unsigned int size) { } -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ -} - int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4f20f62f9d63..d89bdefb42e2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1117,7 +1117,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); void arch_protect_bpf_trampoline(void *image, unsigned int size); -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index db7599c59c78..04fd1abd3661 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -1078,13 +1078,6 @@ void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) set_memory_rox((long)image, 1); } -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ - WARN_ON_ONCE(size > PAGE_SIZE); - set_memory_nx((long)image, 1); - set_memory_rw((long)image, 1); -} - int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) {
Le 15/03/2024 à 15:34, Daniel Borkmann a écrit : > On 2/21/24 3:45 PM, Christophe Leroy wrote: >> arch_protect_bpf_trampoline() and alloc_new_pack() call >> set_memory_rox() which can fail, leading to unprotected memory. >> >> Take into account return from set_memory_XX() functions and add >> __must_check flag to arch_protect_bpf_trampoline(). >> >> Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> >> Reviewed-by: Kees Cook <keescook@chromium.org> >> --- >> v2: >> - Move list_add_tail(&pack->list, &pack_list) at the end of >> alloc_new_pack() >> - Split 2 lines that are reported longer than 80 chars by BPF >> patchwork's checkpatch report. >> --- >> arch/x86/net/bpf_jit_comp.c | 6 ++++-- >> include/linux/bpf.h | 4 ++-- >> kernel/bpf/bpf_struct_ops.c | 9 +++++++-- >> kernel/bpf/core.c | 29 ++++++++++++++++++++++------- >> kernel/bpf/trampoline.c | 18 ++++++++++++------ >> net/bpf/bpf_dummy_struct_ops.c | 4 +++- >> 6 files changed, 50 insertions(+), 20 deletions(-) >> >> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c >> index e1390d1e331b..128c8ec9164e 100644 >> --- a/arch/x86/net/bpf_jit_comp.c >> +++ b/arch/x86/net/bpf_jit_comp.c >> @@ -2780,12 +2780,14 @@ void arch_free_bpf_trampoline(void *image, >> unsigned int size) >> bpf_prog_pack_free(image, size); >> } >> -void arch_protect_bpf_trampoline(void *image, unsigned int size) >> +int arch_protect_bpf_trampoline(void *image, unsigned int size) >> { >> + return 0; >> } >> -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) >> +int arch_unprotect_bpf_trampoline(void *image, unsigned int size) >> { >> + return 0; >> } >> int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void >> *image, void *image_end, >> diff --git a/include/linux/bpf.h b/include/linux/bpf.h >> index b86bd15a051d..bb2723c264df 100644 >> --- a/include/linux/bpf.h >> +++ b/include/linux/bpf.h >> @@ -1116,8 +1116,8 @@ int arch_prepare_bpf_trampoline(struct >> bpf_tramp_image *im, void *image, void *i >> void *func_addr); >> void *arch_alloc_bpf_trampoline(unsigned int size); >> void arch_free_bpf_trampoline(void *image, unsigned int size); >> -void arch_protect_bpf_trampoline(void *image, unsigned int size); >> -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); >> +int __must_check arch_protect_bpf_trampoline(void *image, unsigned >> int size); >> +int arch_unprotect_bpf_trampoline(void *image, unsigned int size); >> int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, >> struct bpf_tramp_links *tlinks, void *func_addr); >> diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c >> index 0decd862dfe0..d920afb0dd60 100644 >> --- a/kernel/bpf/bpf_struct_ops.c >> +++ b/kernel/bpf/bpf_struct_ops.c >> @@ -488,7 +488,9 @@ static long bpf_struct_ops_map_update_elem(struct >> bpf_map *map, void *key, >> if (err) >> goto reset_unlock; >> } >> - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); >> + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); >> + if (err) >> + goto reset_unlock; >> /* Let bpf_link handle registration & unregistration. >> * >> * Pair with smp_load_acquire() during lookup_elem(). >> @@ -497,7 +499,10 @@ static long bpf_struct_ops_map_update_elem(struct >> bpf_map *map, void *key, >> goto unlock; >> } >> - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); >> + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); >> + if (err) >> + goto reset_unlock; >> + >> err = st_ops->reg(kdata); >> if (likely(!err)) { >> /* This refcnt increment on the map here after >> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c >> index c49619ef55d0..eb2256ba6daf 100644 >> --- a/kernel/bpf/core.c >> +++ b/kernel/bpf/core.c >> @@ -898,23 +898,31 @@ static LIST_HEAD(pack_list); >> static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t >> bpf_fill_ill_insns) >> { >> struct bpf_prog_pack *pack; >> + int err; >> pack = kzalloc(struct_size(pack, bitmap, >> BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), >> GFP_KERNEL); >> if (!pack) >> return NULL; >> pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); >> - if (!pack->ptr) { >> - kfree(pack); >> - return NULL; >> - } >> + if (!pack->ptr) >> + goto out; >> bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); >> bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / >> BPF_PROG_CHUNK_SIZE); >> - list_add_tail(&pack->list, &pack_list); >> set_vm_flush_reset_perms(pack->ptr); >> - set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / >> PAGE_SIZE); >> + err = set_memory_rox((unsigned long)pack->ptr, >> + BPF_PROG_PACK_SIZE / PAGE_SIZE); >> + if (err) >> + goto out_free; >> + list_add_tail(&pack->list, &pack_list); >> return pack; >> + >> +out_free: >> + bpf_jit_free_exec(pack->ptr); >> +out: >> + kfree(pack); >> + return NULL; >> } >> void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t >> bpf_fill_ill_insns) >> @@ -929,9 +937,16 @@ void *bpf_prog_pack_alloc(u32 size, >> bpf_jit_fill_hole_t bpf_fill_ill_insns) >> size = round_up(size, PAGE_SIZE); >> ptr = bpf_jit_alloc_exec(size); >> if (ptr) { >> + int err; >> + >> bpf_fill_ill_insns(ptr, size); >> set_vm_flush_reset_perms(ptr); >> - set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); >> + err = set_memory_rox((unsigned long)ptr, >> + size / PAGE_SIZE); >> + if (err) { >> + bpf_jit_free_exec(ptr); >> + ptr = NULL; >> + } >> } >> goto out; >> } >> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c >> index d382f5ebe06c..6e64ac9083b6 100644 >> --- a/kernel/bpf/trampoline.c >> +++ b/kernel/bpf/trampoline.c >> @@ -456,7 +456,9 @@ static int bpf_trampoline_update(struct >> bpf_trampoline *tr, bool lock_direct_mut >> if (err < 0) >> goto out_free; >> - arch_protect_bpf_trampoline(im->image, im->size); >> + err = arch_protect_bpf_trampoline(im->image, im->size); >> + if (err) >> + goto out_free; >> WARN_ON(tr->cur_image && total == 0); >> if (tr->cur_image) >> @@ -1072,17 +1074,21 @@ void __weak arch_free_bpf_trampoline(void >> *image, unsigned int size) >> bpf_jit_free_exec(image); >> } >> -void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) >> +int __weak arch_protect_bpf_trampoline(void *image, unsigned int size) > > nit: Should we add __must_check as well here? Don't think so. Looks like only prototypes get the __must_check See for instance device_create_bin_file(), there's the __must_check on the prototype in include/linux/device.h but not the definition in drivers/base/core.c > >> { >> WARN_ON_ONCE(size > PAGE_SIZE); >> - set_memory_rox((long)image, 1); >> + return set_memory_rox((long)image, 1); >> } >> -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int >> size) >> +int __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) >> { >> + int err; >> WARN_ON_ONCE(size > PAGE_SIZE); >> - set_memory_nx((long)image, 1); >> - set_memory_rw((long)image, 1); >> + >> + err = set_memory_nx((long)image, 1); >> + if (err) >> + return err; >> + return set_memory_rw((long)image, 1); >> } > > Do we still need this? It looks like this does not have an in-tree user > anymore. Looks like last user went away with commit 187e2af05abe ("bpf: struct_ops supports more than one page for trampolines.") but I'm having hard time figuring if it's valid or not. But as there is no user anymore it surely can go away. Will you drop it or do you want a proper patch from me ? Christophe
On 3/15/24 3:55 PM, Christophe Leroy wrote: [...] >>> { >>> WARN_ON_ONCE(size > PAGE_SIZE); >>> - set_memory_rox((long)image, 1); >>> + return set_memory_rox((long)image, 1); >>> } >>> -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int >>> size) >>> +int __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) >>> { >>> + int err; >>> WARN_ON_ONCE(size > PAGE_SIZE); >>> - set_memory_nx((long)image, 1); >>> - set_memory_rw((long)image, 1); >>> + >>> + err = set_memory_nx((long)image, 1); >>> + if (err) >>> + return err; >>> + return set_memory_rw((long)image, 1); >>> } >> >> Do we still need this? It looks like this does not have an in-tree user >> anymore. > > Looks like last user went away with commit 187e2af05abe ("bpf: > struct_ops supports more than one page for trampolines.") but I'm having > hard time figuring if it's valid or not. > > But as there is no user anymore it surely can go away. Will you drop it > or do you want a proper patch from me ? My understanding is that the VM_FLUSH_RESET_PERMS would take care of this via arch_alloc_bpf_trampoline(). Anyway, gvien there is a merge conflict with this patch, pls include it with a v3. Thanks, Daniel
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e1390d1e331b..128c8ec9164e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2780,12 +2780,14 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } -void arch_protect_bpf_trampoline(void *image, unsigned int size) +int arch_protect_bpf_trampoline(void *image, unsigned int size) { + return 0; } -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) +int arch_unprotect_bpf_trampoline(void *image, unsigned int size) { + return 0; } int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b86bd15a051d..bb2723c264df 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1116,8 +1116,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *func_addr); void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); -void arch_protect_bpf_trampoline(void *image, unsigned int size); -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); +int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); +int arch_unprotect_bpf_trampoline(void *image, unsigned int size); int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 0decd862dfe0..d920afb0dd60 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -488,7 +488,9 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (err) goto reset_unlock; } - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); + if (err) + goto reset_unlock; /* Let bpf_link handle registration & unregistration. * * Pair with smp_load_acquire() during lookup_elem(). @@ -497,7 +499,10 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, goto unlock; } - arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); + err = arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE); + if (err) + goto reset_unlock; + err = st_ops->reg(kdata); if (likely(!err)) { /* This refcnt increment on the map here after diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index c49619ef55d0..eb2256ba6daf 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -898,23 +898,31 @@ static LIST_HEAD(pack_list); static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) { struct bpf_prog_pack *pack; + int err; pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), GFP_KERNEL); if (!pack) return NULL; pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); - if (!pack->ptr) { - kfree(pack); - return NULL; - } + if (!pack->ptr) + goto out; bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); - list_add_tail(&pack->list, &pack_list); set_vm_flush_reset_perms(pack->ptr); - set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); + err = set_memory_rox((unsigned long)pack->ptr, + BPF_PROG_PACK_SIZE / PAGE_SIZE); + if (err) + goto out_free; + list_add_tail(&pack->list, &pack_list); return pack; + +out_free: + bpf_jit_free_exec(pack->ptr); +out: + kfree(pack); + return NULL; } void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) @@ -929,9 +937,16 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) size = round_up(size, PAGE_SIZE); ptr = bpf_jit_alloc_exec(size); if (ptr) { + int err; + bpf_fill_ill_insns(ptr, size); set_vm_flush_reset_perms(ptr); - set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); + err = set_memory_rox((unsigned long)ptr, + size / PAGE_SIZE); + if (err) { + bpf_jit_free_exec(ptr); + ptr = NULL; + } } goto out; } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index d382f5ebe06c..6e64ac9083b6 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -456,7 +456,9 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut if (err < 0) goto out_free; - arch_protect_bpf_trampoline(im->image, im->size); + err = arch_protect_bpf_trampoline(im->image, im->size); + if (err) + goto out_free; WARN_ON(tr->cur_image && total == 0); if (tr->cur_image) @@ -1072,17 +1074,21 @@ void __weak arch_free_bpf_trampoline(void *image, unsigned int size) bpf_jit_free_exec(image); } -void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) +int __weak arch_protect_bpf_trampoline(void *image, unsigned int size) { WARN_ON_ONCE(size > PAGE_SIZE); - set_memory_rox((long)image, 1); + return set_memory_rox((long)image, 1); } -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) +int __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) { + int err; WARN_ON_ONCE(size > PAGE_SIZE); - set_memory_nx((long)image, 1); - set_memory_rw((long)image, 1); + + err = set_memory_nx((long)image, 1); + if (err) + return err; + return set_memory_rw((long)image, 1); } int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 02de71719aed..2aaecd8931fc 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -137,7 +137,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, if (err < 0) goto out; - arch_protect_bpf_trampoline(image, PAGE_SIZE); + err = arch_protect_bpf_trampoline(image, PAGE_SIZE); + if (err) + goto out; prog_ret = dummy_ops_call_op(image, args); err = dummy_ops_copy_args(args);