Message ID | 20210324174030.2053353-1-jolsa@kernel.org (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | [PATCHv2] bpf: Take module reference for trampoline in module | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On 3/24/21 6:40 PM, Jiri Olsa wrote: > Currently module can be unloaded even if there's a trampoline > register in it. It's easily reproduced by running in parallel: > > # while :; do ./test_progs -t module_attach; done > # while :; do rmmod bpf_testmod; sleep 0.5; done > > Taking the module reference in case the trampoline's ip is > within the module code. Releasing it when the trampoline's > ip is unregistered. > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > --- > v2 changes: > - fixed ip_module_put to do preempt_disable/preempt_enable > > kernel/bpf/trampoline.c | 31 +++++++++++++++++++++++++++++++ > 1 file changed, 31 insertions(+) > > diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c > index 1f3a4be4b175..39e4280f94e4 100644 > --- a/kernel/bpf/trampoline.c > +++ b/kernel/bpf/trampoline.c > @@ -87,6 +87,26 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) > return tr; > } > > +static struct module *ip_module_get(unsigned long ip) > +{ > + struct module *mod; > + int err = 0; > + > + preempt_disable(); > + mod = __module_text_address(ip); > + if (mod && !try_module_get(mod)) > + err = -ENOENT; > + preempt_enable(); > + return err ? ERR_PTR(err) : mod; > +} > + > +static void ip_module_put(unsigned long ip) > +{ > + preempt_disable(); > + module_put(__module_text_address(ip)); > + preempt_enable(); Could we cache the mod pointer in tr instead of doing another addr search for dropping the ref? > +} > + > static int is_ftrace_location(void *ip) > { > long addr; > @@ -108,6 +128,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) > ret = unregister_ftrace_direct((long)ip, (long)old_addr); > else > ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); > + > + if (!ret) > + ip_module_put((unsigned long) ip); > return ret; > } > > @@ -126,6 +149,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad > /* first time registering */ > static int register_fentry(struct bpf_trampoline *tr, void *new_addr) > { > + struct module *mod; > void *ip = tr->func.addr; > int ret; > > @@ -134,10 +158,17 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) > return ret; > tr->func.ftrace_managed = ret; > > + mod = ip_module_get((unsigned long) ip); > + if (IS_ERR(mod)) > + return -ENOENT; > + > if (tr->func.ftrace_managed) > ret = register_ftrace_direct((long)ip, (long)new_addr); > else > ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); > + > + if (ret) > + module_put(mod); > return ret; > } > >
On Thu, Mar 25, 2021 at 10:26:32PM +0100, Daniel Borkmann wrote: > On 3/24/21 6:40 PM, Jiri Olsa wrote: > > Currently module can be unloaded even if there's a trampoline > > register in it. It's easily reproduced by running in parallel: > > > > # while :; do ./test_progs -t module_attach; done > > # while :; do rmmod bpf_testmod; sleep 0.5; done > > > > Taking the module reference in case the trampoline's ip is > > within the module code. Releasing it when the trampoline's > > ip is unregistered. > > > > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > > --- > > v2 changes: > > - fixed ip_module_put to do preempt_disable/preempt_enable > > > > kernel/bpf/trampoline.c | 31 +++++++++++++++++++++++++++++++ > > 1 file changed, 31 insertions(+) > > > > diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c > > index 1f3a4be4b175..39e4280f94e4 100644 > > --- a/kernel/bpf/trampoline.c > > +++ b/kernel/bpf/trampoline.c > > @@ -87,6 +87,26 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) > > return tr; > > } > > +static struct module *ip_module_get(unsigned long ip) > > +{ > > + struct module *mod; > > + int err = 0; > > + > > + preempt_disable(); > > + mod = __module_text_address(ip); > > + if (mod && !try_module_get(mod)) > > + err = -ENOENT; > > + preempt_enable(); > > + return err ? ERR_PTR(err) : mod; > > +} > > + > > +static void ip_module_put(unsigned long ip) > > +{ > > + preempt_disable(); > > + module_put(__module_text_address(ip)); > > + preempt_enable(); > > Could we cache the mod pointer in tr instead of doing another addr search > for dropping the ref? right.. I moved it from the ftrace layer where this was not an option, so I did not realize bpf_trampoline could get extended, will send new version thanks, jirka > > > +} > > + > > static int is_ftrace_location(void *ip) > > { > > long addr; > > @@ -108,6 +128,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) > > ret = unregister_ftrace_direct((long)ip, (long)old_addr); > > else > > ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); > > + > > + if (!ret) > > + ip_module_put((unsigned long) ip); > > return ret; > > } > > @@ -126,6 +149,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad > > /* first time registering */ > > static int register_fentry(struct bpf_trampoline *tr, void *new_addr) > > { > > + struct module *mod; > > void *ip = tr->func.addr; > > int ret; > > @@ -134,10 +158,17 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) > > return ret; > > tr->func.ftrace_managed = ret; > > + mod = ip_module_get((unsigned long) ip); > > + if (IS_ERR(mod)) > > + return -ENOENT; > > + > > if (tr->func.ftrace_managed) > > ret = register_ftrace_direct((long)ip, (long)new_addr); > > else > > ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); > > + > > + if (ret) > > + module_put(mod); > > return ret; > > } > > >
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 1f3a4be4b175..39e4280f94e4 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -87,6 +87,26 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) return tr; } +static struct module *ip_module_get(unsigned long ip) +{ + struct module *mod; + int err = 0; + + preempt_disable(); + mod = __module_text_address(ip); + if (mod && !try_module_get(mod)) + err = -ENOENT; + preempt_enable(); + return err ? ERR_PTR(err) : mod; +} + +static void ip_module_put(unsigned long ip) +{ + preempt_disable(); + module_put(__module_text_address(ip)); + preempt_enable(); +} + static int is_ftrace_location(void *ip) { long addr; @@ -108,6 +128,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) ret = unregister_ftrace_direct((long)ip, (long)old_addr); else ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); + + if (!ret) + ip_module_put((unsigned long) ip); return ret; } @@ -126,6 +149,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad /* first time registering */ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) { + struct module *mod; void *ip = tr->func.addr; int ret; @@ -134,10 +158,17 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) return ret; tr->func.ftrace_managed = ret; + mod = ip_module_get((unsigned long) ip); + if (IS_ERR(mod)) + return -ENOENT; + if (tr->func.ftrace_managed) ret = register_ftrace_direct((long)ip, (long)new_addr); else ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); + + if (ret) + module_put(mod); return ret; }
Currently module can be unloaded even if there's a trampoline register in it. It's easily reproduced by running in parallel: # while :; do ./test_progs -t module_attach; done # while :; do rmmod bpf_testmod; sleep 0.5; done Taking the module reference in case the trampoline's ip is within the module code. Releasing it when the trampoline's ip is unregistered. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- v2 changes: - fixed ip_module_put to do preempt_disable/preempt_enable kernel/bpf/trampoline.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+)