From patchwork Fri Jun 30 08:33:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297766 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D3853A932 for ; Fri, 30 Jun 2023 08:34:02 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 69C19C433C0; Fri, 30 Jun 2023 08:33:58 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114042; bh=uDN6AaA9M+nq9NqvKkWvbz3C59dBzJ3U6YaBwDwtGaA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Dn6bBhxeS7fk4Xj5242liE+3J23vIzlDr9YHaBy4APsqa56nhPaSDlt2f8X/1KYQB YYbbgQAIZh5WnVyORkE1+2rNsiumm00SvfjFXa83cJPbJ2f+WrmFD3k9Ri7QaECMs0 ANSKSQ2dygHBKJ37uic7LAaUBx2/5P/BaFEKMMbGf1/68TgVRVv64DGo12s0dmPRiW LLwGBlAY4tzzgZlBBlHhhukpcPQmL0dghTPTUJXkjOV0LOYmbhx84h3cA3gnQ1ENm4 BeEwW8MOCKq9wR+eSgHJ+mpMNlbKXihNKCYKnQOwos3dFVyp+EaDzjG45Hk80YqDXY zTsPfUK1V2sgw== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 01/26] bpf: Add attach_type checks under bpf_prog_attach_check_attach_type Date: Fri, 30 Jun 2023 10:33:19 +0200 Message-ID: <20230630083344.984305-2-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Add extra attach_type checks from link_create under bpf_prog_attach_check_attach_type. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- kernel/bpf/syscall.c | 108 +++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 61 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a2aef900519c..9046ad0f9b4e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3502,34 +3502,6 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) return fd; } -static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, - enum bpf_attach_type attach_type) -{ - switch (prog->type) { - case BPF_PROG_TYPE_CGROUP_SOCK: - case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: - case BPF_PROG_TYPE_CGROUP_SOCKOPT: - case BPF_PROG_TYPE_SK_LOOKUP: - return attach_type == prog->expected_attach_type ? 0 : -EINVAL; - case BPF_PROG_TYPE_CGROUP_SKB: - if (!capable(CAP_NET_ADMIN)) - /* cg-skb progs can be loaded by unpriv user. - * check permissions at attach time. - */ - return -EPERM; - return prog->enforce_expected_attach_type && - prog->expected_attach_type != attach_type ? - -EINVAL : 0; - case BPF_PROG_TYPE_KPROBE: - if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && - attach_type != BPF_TRACE_KPROBE_MULTI) - return -EINVAL; - return 0; - default: - return 0; - } -} - static enum bpf_prog_type attach_type_to_prog_type(enum bpf_attach_type attach_type) { @@ -3593,6 +3565,53 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) } } +static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, + enum bpf_attach_type attach_type) +{ + enum bpf_prog_type ptype; + + switch (prog->type) { + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_SK_LOOKUP: + return attach_type == prog->expected_attach_type ? 0 : -EINVAL; + case BPF_PROG_TYPE_CGROUP_SKB: + if (!capable(CAP_NET_ADMIN)) + /* cg-skb progs can be loaded by unpriv user. + * check permissions at attach time. + */ + return -EPERM; + return prog->enforce_expected_attach_type && + prog->expected_attach_type != attach_type ? + -EINVAL : 0; + case BPF_PROG_TYPE_KPROBE: + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && + attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + if (attach_type != BPF_PERF_EVENT && + attach_type != BPF_TRACE_KPROBE_MULTI) + return -EINVAL; + return 0; + case BPF_PROG_TYPE_EXT: + return 0; + case BPF_PROG_TYPE_NETFILTER: + if (attach_type != BPF_NETFILTER) + return -EINVAL; + return 0; + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_TRACEPOINT: + if (attach_type != BPF_PERF_EVENT) + return -EINVAL; + return 0; + default: + ptype = attach_type_to_prog_type(attach_type); + if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) + return -EINVAL; + return 0; + } +} + #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd #define BPF_F_ATTACH_MASK \ @@ -4658,7 +4677,6 @@ static int bpf_map_do_batch(const union bpf_attr *attr, #define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies static int link_create(union bpf_attr *attr, bpfptr_t uattr) { - enum bpf_prog_type ptype; struct bpf_prog *prog; int ret; @@ -4677,38 +4695,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) if (ret) goto out; - switch (prog->type) { - case BPF_PROG_TYPE_EXT: - break; - case BPF_PROG_TYPE_NETFILTER: - if (attr->link_create.attach_type != BPF_NETFILTER) { - ret = -EINVAL; - goto out; - } - break; - case BPF_PROG_TYPE_PERF_EVENT: - case BPF_PROG_TYPE_TRACEPOINT: - if (attr->link_create.attach_type != BPF_PERF_EVENT) { - ret = -EINVAL; - goto out; - } - break; - case BPF_PROG_TYPE_KPROBE: - if (attr->link_create.attach_type != BPF_PERF_EVENT && - attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) { - ret = -EINVAL; - goto out; - } - break; - default: - ptype = attach_type_to_prog_type(attr->link_create.attach_type); - if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) { - ret = -EINVAL; - goto out; - } - break; - } - switch (prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: From patchwork Fri Jun 30 08:33:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297767 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1C26D1FB8 for ; Fri, 30 Jun 2023 08:34:14 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6996DC433C8; Fri, 30 Jun 2023 08:34:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114053; bh=PtLxYB5fmh1uPRleGoQ2V2j4VL+x1kMGrQEAszyuV1Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=k7oi9f/6qZ81BNp3G5uzPCPysXJSzfUBybC1fIcEneGBKawwSgiA0qp7O9+dwdsP2 /VkRcJq4EssaJ3DMvShIK6IajSsMSOzIct6j4diKYHPOyfk6l/FcuU6f4/JDwC0hft xtOQDl/QZd3bBv5QHpfjZADaPDev535uk7zGZ5W/po/71l5fER+BGIfMlN9NQ1ceF7 Dol9tslyOoTyPi5lQ6CcOGOxDyP3hanNtH+6L5k1mIdBuAjCyP3t5xzf9i/oWhVGxH fZtStFNyywnoC1EL31WgcdmcDoD0yzNzLchWcArX+VHLq+0yHphrG1rDOwv4ypBlKo DleMxlbg443Ww== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 02/26] bpf: Add multi uprobe link Date: Fri, 30 Jun 2023 10:33:20 +0200 Message-ID: <20230630083344.984305-3-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding new multi uprobe link that allows to attach bpf program to multiple uprobes. Uprobes to attach are specified via new link_create uprobe_multi union: struct { __u32 flags; __u32 cnt; __aligned_u64 path; __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; } uprobe_multi; Uprobes are defined for single binary specified in path and multiple calling sites specified in offsets array with optional reference counters specified in ref_ctr_offsets array. All specified arrays have length of 'cnt'. The 'flags' supports single bit for now that marks the uprobe as return probe. Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- include/linux/trace_events.h | 6 + include/uapi/linux/bpf.h | 14 ++ kernel/bpf/syscall.c | 14 +- kernel/trace/bpf_trace.c | 237 +++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 14 ++ 5 files changed, 282 insertions(+), 3 deletions(-) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 7c4a0b72334e..c71845e9d40a 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -749,6 +749,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -795,6 +796,11 @@ bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { return -EOPNOTSUPP; } +static inline int +bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 60a9d59beeab..a236139f08ce 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1036,6 +1036,7 @@ enum bpf_attach_type { BPF_LSM_CGROUP, BPF_STRUCT_OPS, BPF_NETFILTER, + BPF_TRACE_UPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1053,6 +1054,7 @@ enum bpf_link_type { BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_UPROBE_MULTI = 11, MAX_BPF_LINK_TYPE, }; @@ -1170,6 +1172,11 @@ enum bpf_link_type { */ #define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +/* link_create.uprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_UPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_UPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1579,6 +1586,13 @@ union bpf_attr { __s32 priority; __u32 flags; } netfilter; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + } uprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 9046ad0f9b4e..3b0582a64ce4 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2813,10 +2813,12 @@ static void bpf_link_free_id(int id) /* Clean up bpf_link and corresponding anon_inode file and FD. After * anon_inode is created, bpf_link can't be just kfree()'d due to deferred - * anon_inode's release() call. This helper marksbpf_link as + * anon_inode's release() call. This helper marks bpf_link as * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt * is not decremented, it's the responsibility of a calling code that failed * to complete bpf_link initialization. + * This helper eventually calls link's dealloc callback, but does not call + * link's release callback. */ void bpf_link_cleanup(struct bpf_link_primer *primer) { @@ -3589,8 +3591,12 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI && attach_type != BPF_TRACE_KPROBE_MULTI) return -EINVAL; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && + attach_type != BPF_TRACE_UPROBE_MULTI) + return -EINVAL; if (attach_type != BPF_PERF_EVENT && - attach_type != BPF_TRACE_KPROBE_MULTI) + attach_type != BPF_TRACE_KPROBE_MULTI && + attach_type != BPF_TRACE_UPROBE_MULTI) return -EINVAL; return 0; case BPF_PROG_TYPE_EXT: @@ -4748,8 +4754,10 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_KPROBE: if (attr->link_create.attach_type == BPF_PERF_EVENT) ret = bpf_perf_link_attach(attr, prog); - else + else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI) ret = bpf_kprobe_multi_link_attach(attr, prog); + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) + ret = bpf_uprobe_multi_link_attach(attr, prog); break; default: ret = -EINVAL; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 03b7f6b8e4f0..a0b9d034300f 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -2922,3 +2923,239 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) return 0; } #endif + +#ifdef CONFIG_UPROBES +struct bpf_uprobe_multi_link; + +struct bpf_uprobe { + struct bpf_uprobe_multi_link *link; + loff_t offset; + struct uprobe_consumer consumer; +}; + +struct bpf_uprobe_multi_link { + struct path path; + struct bpf_link link; + u32 cnt; + struct bpf_uprobe *uprobes; +}; + +struct bpf_uprobe_multi_run_ctx { + struct bpf_run_ctx run_ctx; + unsigned long entry_ip; +}; + +static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, + u32 cnt) +{ + u32 i; + + for (i = 0; i < cnt; i++) { + uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset, + &uprobes[i].consumer); + } +} + +static void bpf_uprobe_multi_link_release(struct bpf_link *link) +{ + struct bpf_uprobe_multi_link *umulti_link; + + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); + bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); + path_put(&umulti_link->path); +} + +static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) +{ + struct bpf_uprobe_multi_link *umulti_link; + + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); + kvfree(umulti_link->uprobes); + kfree(umulti_link); +} + +static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { + .release = bpf_uprobe_multi_link_release, + .dealloc = bpf_uprobe_multi_link_dealloc, +}; + +static int uprobe_prog_run(struct bpf_uprobe *uprobe, + unsigned long entry_ip, + struct pt_regs *regs) +{ + struct bpf_uprobe_multi_link *link = uprobe->link; + struct bpf_uprobe_multi_run_ctx run_ctx = { + .entry_ip = entry_ip, + }; + struct bpf_prog *prog = link->link.prog; + bool sleepable = prog->aux->sleepable; + struct bpf_run_ctx *old_run_ctx; + int err = 0; + + might_fault(); + + migrate_disable(); + + if (sleepable) + rcu_read_lock_trace(); + else + rcu_read_lock(); + + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + err = bpf_prog_run(link->link.prog, regs); + bpf_reset_run_ctx(old_run_ctx); + + if (sleepable) + rcu_read_unlock_trace(); + else + rcu_read_unlock(); + + migrate_enable(); + return err; +} + +static int +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); +} + +static int +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe_prog_run(uprobe, func, regs); +} + +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_uprobe_multi_link *link = NULL; + unsigned long __user *uref_ctr_offsets; + unsigned long *ref_ctr_offsets = NULL; + struct bpf_link_primer link_primer; + struct bpf_uprobe *uprobes = NULL; + unsigned long __user *uoffsets; + void __user *upath; + u32 flags, cnt, i; + struct path path; + char *name; + int err; + + /* no support for 32bit archs yet */ + if (sizeof(u64) != sizeof(void *)) + return -EOPNOTSUPP; + + if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) + return -EINVAL; + + flags = attr->link_create.uprobe_multi.flags; + if (flags & ~BPF_F_UPROBE_MULTI_RETURN) + return -EINVAL; + + /* + * path, offsets and cnt are mandatory, + * ref_ctr_offsets is optional + */ + upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); + uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); + cnt = attr->link_create.uprobe_multi.cnt; + + if (!upath || !uoffsets || !cnt) + return -EINVAL; + + uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); + + name = strndup_user(upath, PATH_MAX); + if (IS_ERR(name)) { + err = PTR_ERR(name); + return err; + } + + err = kern_path(name, LOOKUP_FOLLOW, &path); + kfree(name); + if (err) + return err; + + if (!d_is_reg(path.dentry)) { + err = -EINVAL; + goto error_path_put; + } + + err = -ENOMEM; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL); + + if (!uprobes || !link) + goto error_free; + + if (uref_ctr_offsets) { + ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL); + if (!ref_ctr_offsets) + goto error_free; + } + + for (i = 0; i < cnt; i++) { + if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) { + err = -EFAULT; + goto error_free; + } + if (__get_user(uprobes[i].offset, uoffsets + i)) { + err = -EFAULT; + goto error_free; + } + + uprobes[i].link = link; + + if (flags & BPF_F_UPROBE_MULTI_RETURN) + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; + else + uprobes[i].consumer.handler = uprobe_multi_link_handler; + } + + link->cnt = cnt; + link->uprobes = uprobes; + link->path = path; + + bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, + &bpf_uprobe_multi_link_lops, prog); + + err = bpf_link_prime(&link->link, &link_primer); + if (err) + goto error_free; + + for (i = 0; i < cnt; i++) { + err = uprobe_register_refctr(d_real_inode(link->path.dentry), + uprobes[i].offset, + ref_ctr_offsets ? ref_ctr_offsets[i] : 0, + &uprobes[i].consumer); + if (err) { + bpf_uprobe_unregister(&path, uprobes, i); + bpf_link_cleanup(&link_primer); + kvfree(ref_ctr_offsets); + return err; + } + } + + kvfree(ref_ctr_offsets); + return bpf_link_settle(&link_primer); + +error_free: + kvfree(ref_ctr_offsets); + kvfree(uprobes); + kfree(link); +error_path_put: + path_put(&path); + return err; +} +#else /* !CONFIG_UPROBES */ +int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_UPROBES */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 60a9d59beeab..a236139f08ce 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1036,6 +1036,7 @@ enum bpf_attach_type { BPF_LSM_CGROUP, BPF_STRUCT_OPS, BPF_NETFILTER, + BPF_TRACE_UPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1053,6 +1054,7 @@ enum bpf_link_type { BPF_LINK_TYPE_KPROBE_MULTI = 8, BPF_LINK_TYPE_STRUCT_OPS = 9, BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_UPROBE_MULTI = 11, MAX_BPF_LINK_TYPE, }; @@ -1170,6 +1172,11 @@ enum bpf_link_type { */ #define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +/* link_create.uprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_UPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_UPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1579,6 +1586,13 @@ union bpf_attr { __s32 priority; __u32 flags; } netfilter; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + } uprobe_multi; }; } link_create; From patchwork Fri Jun 30 08:33:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297768 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 72B9A1FB8 for ; Fri, 30 Jun 2023 08:34:25 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6A326C433C0; Fri, 30 Jun 2023 08:34:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114065; bh=IK+MfoK/+sXqjHtk5grgY3lYySrmEECf+JLE2l/bfRg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=UOr5VAA5hsugPhNTiNvglTblu89NkdogW45XR/+XCzdq2ryd/qCAdWs5wWVBSNXaV F9FF64FzMThWhG98PReE1QkV2CEEOEcleYA3n9CH4L/E+cVy3bOzZ6K5WAEmc+s7F4 XL7C5vl6Wm/hzsXn2g7Ia92ilfMmLw2yHMm80DK7boj+1N/LYvYsUHHgRpAn5QYqVF gSvddPj/woTYGc8s++p+dNGdjTtSas+WlXSURTbSe2s8TwQnMjncNw1zD/zXsgAGmP v+vxSQu2Xk9TlaMVnZxWPoUrYqhSJeHWuI+lem5j5SZtHwbopC15Jtfo1UusR6wUvQ T0m/S6N5sv6pg== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 03/26] bpf: Add cookies support for uprobe_multi link Date: Fri, 30 Jun 2023 10:33:21 +0200 Message-ID: <20230630083344.984305-4-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support to specify cookies array for uprobe_multi link. The cookies array share indexes and length with other uprobe_multi arrays (offsets/ref_ctr_offsets). The cookies[i] value defines cookie for i-the uprobe and will be returned by bpf_get_attach_cookie helper when called from ebpf program hooked to that specific uprobe. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 2 +- kernel/trace/bpf_trace.c | 45 +++++++++++++++++++++++++++++++--- tools/include/uapi/linux/bpf.h | 1 + 4 files changed, 44 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a236139f08ce..4103f395593b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1592,6 +1592,7 @@ union bpf_attr { __aligned_u64 path; __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; } uprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3b0582a64ce4..affad356c603 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4680,7 +4680,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr, return err; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies +#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.cookies static int link_create(union bpf_attr *attr, bpfptr_t uattr) { struct bpf_prog *prog; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a0b9d034300f..4657df8f884e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -87,6 +87,8 @@ static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); + /** * trace_call_bpf - invoke BPF program * @call: tracepoint event @@ -1099,6 +1101,18 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) +{ + return bpf_uprobe_multi_cookie(current->bpf_ctx); +} + +static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = { + .func = bpf_get_attach_cookie_uprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) { struct bpf_trace_run_ctx *run_ctx; @@ -1545,9 +1559,11 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) &bpf_get_func_ip_proto_kprobe_multi : &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: - return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? - &bpf_get_attach_cookie_proto_kmulti : - &bpf_get_attach_cookie_proto_trace; + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) + return &bpf_get_attach_cookie_proto_kmulti; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + return &bpf_get_attach_cookie_proto_umulti; + return &bpf_get_attach_cookie_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -2930,6 +2946,7 @@ struct bpf_uprobe_multi_link; struct bpf_uprobe { struct bpf_uprobe_multi_link *link; loff_t offset; + u64 cookie; struct uprobe_consumer consumer; }; @@ -2943,6 +2960,7 @@ struct bpf_uprobe_multi_link { struct bpf_uprobe_multi_run_ctx { struct bpf_run_ctx run_ctx; unsigned long entry_ip; + struct bpf_uprobe *uprobe; }; static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes, @@ -2986,6 +3004,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, struct bpf_uprobe_multi_link *link = uprobe->link; struct bpf_uprobe_multi_run_ctx run_ctx = { .entry_ip = entry_ip, + .uprobe = uprobe, }; struct bpf_prog *prog = link->link.prog; bool sleepable = prog->aux->sleepable; @@ -3032,6 +3051,14 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s return uprobe_prog_run(uprobe, func, regs); } +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) +{ + struct bpf_uprobe_multi_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + return run_ctx->uprobe->cookie; +} + int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_uprobe_multi_link *link = NULL; @@ -3040,6 +3067,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr struct bpf_link_primer link_primer; struct bpf_uprobe *uprobes = NULL; unsigned long __user *uoffsets; + u64 __user *ucookies; void __user *upath; u32 flags, cnt, i; struct path path; @@ -3059,7 +3087,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr /* * path, offsets and cnt are mandatory, - * ref_ctr_offsets is optional + * ref_ctr_offsets and cookies are optional */ upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); @@ -3069,6 +3097,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr return -EINVAL; uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets); + ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies); name = strndup_user(upath, PATH_MAX); if (IS_ERR(name)) { @@ -3101,6 +3130,10 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr } for (i = 0; i < cnt; i++) { + if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { + err = -EFAULT; + goto error_free; + } if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) { err = -EFAULT; goto error_free; @@ -3158,4 +3191,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { return -EOPNOTSUPP; } +static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) +{ + return 0; +} #endif /* CONFIG_UPROBES */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a236139f08ce..4103f395593b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1592,6 +1592,7 @@ union bpf_attr { __aligned_u64 path; __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; } uprobe_multi; }; } link_create; From patchwork Fri Jun 30 08:33:22 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297769 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 8E18AA932 for ; Fri, 30 Jun 2023 08:34:37 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 97528C433C8; Fri, 30 Jun 2023 08:34:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114077; bh=lpC0jJgw4lN49OtXHEwUVDXhmCzs7pbcDtpBVmr8lEQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cG4wo9QrEUBUnlJ7tz3S7G4MTtfm1/b1Nm6G2jsh2U0LNi505Z4wjml3dhAEgbvYs d+gxTBAx7B6IWowC+JiHF5wtL7ufQpBmRjOBIOYDa0Drv3Y7vYDoCDTmfBpKCQTfvM ZMbOoRmvcuBZjhQWLFkpFJe2sY05tKyfM/CB3DYOo6WsQJIuUcTzxVYvozkb6mm2KA TMigBTVprF1LuHz37Je9uD0V7QPqeOQJytTxFX4dMmonSCC6JwnnQ5/0sBb9unfaXq SKkuFuf8bIq2Yu+qoWMmm7VAEeQhSvizP4Ym+XWd26YsxW75kRnQTeQKid6+qyy4Cm lsHXgubUloQNA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: Oleg Nesterov , bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 04/26] bpf: Add pid filter support for uprobe_multi link Date: Fri, 30 Jun 2023 10:33:22 +0200 Message-ID: <20230630083344.984305-5-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support to specify pid for uprobe_multi link and the uprobes are created only for task with given pid value. Using the consumer.filter filter callback for that, so the task gets filtered during the uprobe installation. We still need to check the task during runtime in the uprobe handler, because the handler could get executed if there's another system wide consumer on the same uprobe (thanks Oleg for the insight). Cc: Oleg Nesterov Reviewed-by: Oleg Nesterov Signed-off-by: Jiri Olsa --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 2 +- kernel/trace/bpf_trace.c | 33 +++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 1 + 4 files changed, 36 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4103f395593b..347c5ed6286b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1593,6 +1593,7 @@ union bpf_attr { __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; __aligned_u64 cookies; + __u32 pid; } uprobe_multi; }; } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index affad356c603..c16b9f80099d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4680,7 +4680,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr, return err; } -#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.cookies +#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid static int link_create(union bpf_attr *attr, bpfptr_t uattr) { struct bpf_prog *prog; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 4657df8f884e..4ef51fd0497f 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2955,6 +2955,7 @@ struct bpf_uprobe_multi_link { struct bpf_link link; u32 cnt; struct bpf_uprobe *uprobes; + struct task_struct *task; }; struct bpf_uprobe_multi_run_ctx { @@ -2981,6 +2982,8 @@ static void bpf_uprobe_multi_link_release(struct bpf_link *link) umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt); path_put(&umulti_link->path); + if (umulti_link->task) + put_task_struct(umulti_link->task); } static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) @@ -3011,6 +3014,9 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, struct bpf_run_ctx *old_run_ctx; int err = 0; + if (link->task && current != link->task) + return 0; + might_fault(); migrate_disable(); @@ -3033,6 +3039,16 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, return err; } +static bool +uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx, + struct mm_struct *mm) +{ + struct bpf_uprobe *uprobe; + + uprobe = container_of(con, struct bpf_uprobe, consumer); + return uprobe->link->task->mm == mm; +} + static int uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) { @@ -3066,12 +3082,14 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr unsigned long *ref_ctr_offsets = NULL; struct bpf_link_primer link_primer; struct bpf_uprobe *uprobes = NULL; + struct task_struct *task = NULL; unsigned long __user *uoffsets; u64 __user *ucookies; void __user *upath; u32 flags, cnt, i; struct path path; char *name; + pid_t pid; int err; /* no support for 32bit archs yet */ @@ -3115,6 +3133,15 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr goto error_path_put; } + pid = attr->link_create.uprobe_multi.pid; + if (pid) { + rcu_read_lock(); + task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); + if (!task) + goto error_path_put; + } + err = -ENOMEM; link = kzalloc(sizeof(*link), GFP_KERNEL); @@ -3149,11 +3176,15 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; else uprobes[i].consumer.handler = uprobe_multi_link_handler; + + if (pid) + uprobes[i].consumer.filter = uprobe_multi_link_filter; } link->cnt = cnt; link->uprobes = uprobes; link->path = path; + link->task = task; bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, &bpf_uprobe_multi_link_lops, prog); @@ -3182,6 +3213,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr kvfree(ref_ctr_offsets); kvfree(uprobes); kfree(link); + if (task) + put_task_struct(task); error_path_put: path_put(&path); return err; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4103f395593b..347c5ed6286b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1593,6 +1593,7 @@ union bpf_attr { __aligned_u64 offsets; __aligned_u64 ref_ctr_offsets; __aligned_u64 cookies; + __u32 pid; } uprobe_multi; }; } link_create; From patchwork Fri Jun 30 08:33:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297770 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B03DFA932 for ; Fri, 30 Jun 2023 08:34:49 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id CEE22C433CC; Fri, 30 Jun 2023 08:34:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114089; bh=0qoRw/U65xIcHG3lNFN6XfJEJya6smmXh7LEKBX7N8Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=EkFDzDKo+kG2/pxVXpxhIeUuG3p4nDFB3u/E9KDmDVn9CyFpumMg4Kd+XaOlVKrLt quHkFJTkU06fVYXiL224D494eoIQb3R2js43DcPhFsc5Ph/HizJTMqzHKauUhsOz1A TpWWZ53X80iGN8tfrU21PiP3HrQw1VGAufuKboVU7cY6WxcVTudqgJTr5p0vyz9Tx+ yB1NoavLC9V2tLNE2lyC/kNWyp/jGMIBmsJrVJ+T1gvPNQDDQ+6Si7V6RGFbR1Z0kW 4abjfxe7iSPWQ07fL3HveVpFKIeoyY4P1bbGVPPsK8RueP9cNd4BU2YS33yPHv18Gm fgNVIiOBhKO8w== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 05/26] bpf: Add bpf_get_func_ip helper support for uprobe link Date: Fri, 30 Jun 2023 10:33:23 +0200 Message-ID: <20230630083344.984305-6-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support for bpf_get_func_ip helper being called from ebpf program attached by uprobe_multi link. It returns the ip of the uprobe. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa --- kernel/trace/bpf_trace.c | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 4ef51fd0497f..f5a41c1604b8 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -88,6 +88,7 @@ static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx); +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx); /** * trace_call_bpf - invoke BPF program @@ -1101,6 +1102,18 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { .arg1_type = ARG_PTR_TO_CTX, }; +BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs) +{ + return bpf_uprobe_multi_entry_ip(current->bpf_ctx); +} + +static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = { + .func = bpf_get_func_ip_uprobe_multi, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs) { return bpf_uprobe_multi_cookie(current->bpf_ctx); @@ -1555,9 +1568,11 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_override_return_proto; #endif case BPF_FUNC_get_func_ip: - return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? - &bpf_get_func_ip_proto_kprobe_multi : - &bpf_get_func_ip_proto_kprobe; + if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) + return &bpf_get_func_ip_proto_kprobe_multi; + if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + return &bpf_get_func_ip_proto_uprobe_multi; + return &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI) return &bpf_get_attach_cookie_proto_kmulti; @@ -3067,6 +3082,14 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s return uprobe_prog_run(uprobe, func, regs); } +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) +{ + struct bpf_uprobe_multi_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + return run_ctx->entry_ip; +} + static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) { struct bpf_uprobe_multi_run_ctx *run_ctx; @@ -3228,4 +3251,8 @@ static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) { return 0; } +static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) +{ + return 0; +} #endif /* CONFIG_UPROBES */ From patchwork Fri Jun 30 08:33:24 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297771 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 61BFDA932 for ; Fri, 30 Jun 2023 08:35:01 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A2CB0C433C8; Fri, 30 Jun 2023 08:34:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114101; bh=BOzTSYYk9hcQZWt1tInRxMn1KtcTN4kfX+JokwRYB1Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=i1LPi8i78Lz+Zue2ZJbLSeQg3VMn0nPtjYhW3xdA4SleoFaGYg3lA7VfzP70uzxx4 eYBj3efa+AWVOFO1DccPxx1jJDc62/X9ZguW0wOgmMHR5wF5Ipf4VW8ABt7ZNn69J0 l64AickeWixMjZRQi+ZzAgstVSDX/ztQeW1Tt7gqZVnc/LUJKVA+Hf4fWFHcVhwvl1 hYYfM7elPenCm0cO3QqMieRmrFpunocIxxlqpXUSoEsK+atintKTENbgHSo9VIiX+I M3a6vHxqvd3CsnSDkkgARFJVsx8NEv7LKdf2gvtIC2bE3E0mWrhOwf+b/lMwJSBTQ2 laaqGWrC2GC8Q== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 06/26] libbpf: Add uprobe_multi attach type and link names Date: Fri, 30 Jun 2023 10:33:24 +0200 Message-ID: <20230630083344.984305-7-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding new uprobe_multi attach type and link names, so the functions can resolve the new values. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa --- tools/lib/bpf/libbpf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 214f828ece6b..53dcf25c4878 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -118,6 +118,7 @@ static const char * const attach_type_name[] = { [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi", [BPF_STRUCT_OPS] = "struct_ops", [BPF_NETFILTER] = "netfilter", + [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", }; static const char * const link_type_name[] = { @@ -132,6 +133,7 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi", [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops", [BPF_LINK_TYPE_NETFILTER] = "netfilter", + [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", }; static const char * const map_type_name[] = { From patchwork Fri Jun 30 08:33:25 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297772 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6D3B5A932 for ; Fri, 30 Jun 2023 08:35:13 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A3B90C433C0; Fri, 30 Jun 2023 08:35:09 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114113; bh=J8yM+PBhePqqb1qm8PTZ+x79N7m8p5Ir/KVVktpM3RQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=eiLNL9gl1mgIXM06FkgcyWvUVyRGCHn0B43S3HI14B00KtTtmbCAzTp8LCGU7kGjX A0TirHAj1OzT9aCFn66V/ZiWADnMEmNjKzJT3PNdc/+P8hA4yLDwCCUrTyDojiJbI3 omEgtMEkhB6ZlGJCFw+IHmRmND5p1B2YSMZE3H9KgTmVQm2OrI9nYF8ctEiugJG9ZI 3ATd95k8SBTyQBWONaEwZ9fP13JI9RkGhzd284af6mySJ8pBxm+bktToPhsZOaTCzx ADyYyj0mWLWzNu/kxGp1/ZsqhqMeDCZEQKJ/iz3rfs96L91gGD7VW2L1HIquBmi6Cm 8UXvu5icXsuTg== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 07/26] libbpf: Move elf_find_func_offset* functions to elf object Date: Fri, 30 Jun 2023 10:33:25 +0200 Message-ID: <20230630083344.984305-8-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding new elf object that will contain elf related functions. There's no functional change. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa --- tools/lib/bpf/Build | 2 +- tools/lib/bpf/elf.c | 198 +++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.c | 186 +--------------------------------- tools/lib/bpf/libbpf_elf.h | 11 +++ 4 files changed, 211 insertions(+), 186 deletions(-) create mode 100644 tools/lib/bpf/elf.c create mode 100644 tools/lib/bpf/libbpf_elf.h diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index b8b0a6369363..2d0c282c8588 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ - usdt.o zip.o + usdt.o zip.o elf.o diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c new file mode 100644 index 000000000000..2b62b4af28ce --- /dev/null +++ b/tools/lib/bpf/elf.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include +#include +#include +#include + +#include "libbpf_elf.h" +#include "libbpf_internal.h" +#include "str_error.h" + +#define STRERR_BUFSIZE 128 + +/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ +static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) +{ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + GElf_Shdr sh; + + if (!gelf_getshdr(scn, &sh)) + continue; + if (sh.sh_type == sh_type) + return scn; + } + return NULL; +} + +/* Find offset of function name in the provided ELF object. "binary_path" is + * the path to the ELF binary represented by "elf", and only used for error + * reporting matters. "name" matches symbol name or name@@LIB for library + * functions. + */ +long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) +{ + int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + bool is_shared_lib, is_name_qualified; + long ret = -ENOENT; + size_t name_len; + GElf_Ehdr ehdr; + + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + /* for shared lib case, we do not need to calculate relative offset */ + is_shared_lib = ehdr.e_type == ET_DYN; + + name_len = strlen(name); + /* Does name specify "@@LIB"? */ + is_name_qualified = strstr(name, "@@") != NULL; + + /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if + * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically + * linked binary may not have SHT_DYMSYM, so absence of a section should not be + * reported as a warning/error. + */ + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + size_t nr_syms, strtabidx, idx; + Elf_Data *symbols = NULL; + Elf_Scn *scn = NULL; + int last_bind = -1; + const char *sname; + GElf_Shdr sh; + + scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL); + if (!scn) { + pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", + binary_path); + continue; + } + if (!gelf_getshdr(scn, &sh)) + continue; + strtabidx = sh.sh_link; + symbols = elf_getdata(scn, 0); + if (!symbols) { + pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", + binary_path, elf_errmsg(-1)); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } + nr_syms = symbols->d_size / sh.sh_entsize; + + for (idx = 0; idx < nr_syms; idx++) { + int curr_bind; + GElf_Sym sym; + Elf_Scn *sym_scn; + GElf_Shdr sym_sh; + + if (!gelf_getsym(symbols, idx, &sym)) + continue; + + if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) + continue; + + sname = elf_strptr(elf, strtabidx, sym.st_name); + if (!sname) + continue; + + curr_bind = GELF_ST_BIND(sym.st_info); + + /* User can specify func, func@@LIB or func@@LIB_VERSION. */ + if (strncmp(sname, name, name_len) != 0) + continue; + /* ...but we don't want a search for "foo" to match 'foo2" also, so any + * additional characters in sname should be of the form "@@LIB". + */ + if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@') + continue; + + if (ret >= 0) { + /* handle multiple matches */ + if (last_bind != STB_WEAK && curr_bind != STB_WEAK) { + /* Only accept one non-weak bind. */ + pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", + sname, name, binary_path); + ret = -LIBBPF_ERRNO__FORMAT; + goto out; + } else if (curr_bind == STB_WEAK) { + /* already have a non-weak bind, and + * this is a weak bind, so ignore. + */ + continue; + } + } + + /* Transform symbol's virtual address (absolute for + * binaries and relative for shared libs) into file + * offset, which is what kernel is expecting for + * uprobe/uretprobe attachment. + * See Documentation/trace/uprobetracer.rst for more + * details. + * This is done by looking up symbol's containing + * section's header and using it's virtual address + * (sh_addr) and corresponding file offset (sh_offset) + * to transform sym.st_value (virtual address) into + * desired final file offset. + */ + sym_scn = elf_getscn(elf, sym.st_shndx); + if (!sym_scn) + continue; + if (!gelf_getshdr(sym_scn, &sym_sh)) + continue; + + ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset; + last_bind = curr_bind; + } + if (ret > 0) + break; + } + + if (ret > 0) { + pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, + ret); + } else { + if (ret == 0) { + pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, + is_shared_lib ? "should not be 0 in a shared library" : + "try using shared library path instead"); + ret = -ENOENT; + } else { + pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); + } + } +out: + return ret; +} + +/* Find offset of function name in ELF object specified by path. "name" matches + * symbol name or name@@LIB for library functions. + */ +long elf_find_func_offset_from_file(const char *binary_path, const char *name) +{ + char errmsg[STRERR_BUFSIZE]; + long ret = -ENOENT; + Elf *elf; + int fd; + + fd = open(binary_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = -errno; + pr_warn("failed to open %s: %s\n", binary_path, + libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + return ret; + } + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); + close(fd); + return -LIBBPF_ERRNO__FORMAT; + } + + ret = elf_find_func_offset(elf, binary_path, name); + elf_end(elf); + close(fd); + return ret; +} + diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 53dcf25c4878..093add8124d8 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -54,6 +54,7 @@ #include "hashmap.h" #include "bpf_gen_internal.h" #include "zip.h" +#include "libbpf_elf.h" #ifndef BPF_FS_MAGIC #define BPF_FS_MAGIC 0xcafe4a11 @@ -10811,191 +10812,6 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, return err; } -/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ -static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) -{ - while ((scn = elf_nextscn(elf, scn)) != NULL) { - GElf_Shdr sh; - - if (!gelf_getshdr(scn, &sh)) - continue; - if (sh.sh_type == sh_type) - return scn; - } - return NULL; -} - -/* Find offset of function name in the provided ELF object. "binary_path" is - * the path to the ELF binary represented by "elf", and only used for error - * reporting matters. "name" matches symbol name or name@@LIB for library - * functions. - */ -static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) -{ - int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; - bool is_shared_lib, is_name_qualified; - long ret = -ENOENT; - size_t name_len; - GElf_Ehdr ehdr; - - if (!gelf_getehdr(elf, &ehdr)) { - pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } - /* for shared lib case, we do not need to calculate relative offset */ - is_shared_lib = ehdr.e_type == ET_DYN; - - name_len = strlen(name); - /* Does name specify "@@LIB"? */ - is_name_qualified = strstr(name, "@@") != NULL; - - /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if - * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically - * linked binary may not have SHT_DYMSYM, so absence of a section should not be - * reported as a warning/error. - */ - for (i = 0; i < ARRAY_SIZE(sh_types); i++) { - size_t nr_syms, strtabidx, idx; - Elf_Data *symbols = NULL; - Elf_Scn *scn = NULL; - int last_bind = -1; - const char *sname; - GElf_Shdr sh; - - scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL); - if (!scn) { - pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", - binary_path); - continue; - } - if (!gelf_getshdr(scn, &sh)) - continue; - strtabidx = sh.sh_link; - symbols = elf_getdata(scn, 0); - if (!symbols) { - pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", - binary_path, elf_errmsg(-1)); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } - nr_syms = symbols->d_size / sh.sh_entsize; - - for (idx = 0; idx < nr_syms; idx++) { - int curr_bind; - GElf_Sym sym; - Elf_Scn *sym_scn; - GElf_Shdr sym_sh; - - if (!gelf_getsym(symbols, idx, &sym)) - continue; - - if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) - continue; - - sname = elf_strptr(elf, strtabidx, sym.st_name); - if (!sname) - continue; - - curr_bind = GELF_ST_BIND(sym.st_info); - - /* User can specify func, func@@LIB or func@@LIB_VERSION. */ - if (strncmp(sname, name, name_len) != 0) - continue; - /* ...but we don't want a search for "foo" to match 'foo2" also, so any - * additional characters in sname should be of the form "@@LIB". - */ - if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@') - continue; - - if (ret >= 0) { - /* handle multiple matches */ - if (last_bind != STB_WEAK && curr_bind != STB_WEAK) { - /* Only accept one non-weak bind. */ - pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", - sname, name, binary_path); - ret = -LIBBPF_ERRNO__FORMAT; - goto out; - } else if (curr_bind == STB_WEAK) { - /* already have a non-weak bind, and - * this is a weak bind, so ignore. - */ - continue; - } - } - - /* Transform symbol's virtual address (absolute for - * binaries and relative for shared libs) into file - * offset, which is what kernel is expecting for - * uprobe/uretprobe attachment. - * See Documentation/trace/uprobetracer.rst for more - * details. - * This is done by looking up symbol's containing - * section's header and using it's virtual address - * (sh_addr) and corresponding file offset (sh_offset) - * to transform sym.st_value (virtual address) into - * desired final file offset. - */ - sym_scn = elf_getscn(elf, sym.st_shndx); - if (!sym_scn) - continue; - if (!gelf_getshdr(sym_scn, &sym_sh)) - continue; - - ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset; - last_bind = curr_bind; - } - if (ret > 0) - break; - } - - if (ret > 0) { - pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path, - ret); - } else { - if (ret == 0) { - pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path, - is_shared_lib ? "should not be 0 in a shared library" : - "try using shared library path instead"); - ret = -ENOENT; - } else { - pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path); - } - } -out: - return ret; -} - -/* Find offset of function name in ELF object specified by path. "name" matches - * symbol name or name@@LIB for library functions. - */ -static long elf_find_func_offset_from_file(const char *binary_path, const char *name) -{ - char errmsg[STRERR_BUFSIZE]; - long ret = -ENOENT; - Elf *elf; - int fd; - - fd = open(binary_path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = -errno; - pr_warn("failed to open %s: %s\n", binary_path, - libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); - return ret; - } - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); - close(fd); - return -LIBBPF_ERRNO__FORMAT; - } - - ret = elf_find_func_offset(elf, binary_path, name); - elf_end(elf); - close(fd); - return ret; -} - /* Find offset of function name in archive specified by path. Currently * supported are .zip files that do not compress their contents, as used on * Android in the form of APKs, for example. "file_name" is the name of the ELF diff --git a/tools/lib/bpf/libbpf_elf.h b/tools/lib/bpf/libbpf_elf.h new file mode 100644 index 000000000000..1b652220fabf --- /dev/null +++ b/tools/lib/bpf/libbpf_elf.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ + +#ifndef __LIBBPF_LIBBPF_ELF_H +#define __LIBBPF_LIBBPF_ELF_H + +#include + +long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); +long elf_find_func_offset_from_file(const char *binary_path, const char *name); + +#endif /* *__LIBBPF_LIBBPF_ELF_H */ From patchwork Fri Jun 30 08:33:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297773 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 15830A932 for ; Fri, 30 Jun 2023 08:35:25 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A7AF6C433C8; Fri, 30 Jun 2023 08:35:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114125; bh=Lo2VfRcx8TDju3ULQzfkd+mHlmoXoIiWwskwmwWB7rY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=uIpfkMmC86NglFlSxwjuWjG8ufAV0amw3i3iZlACmi2lmFiyWMozdCp/gvd6v7i0P Ijcq3ALLPQ8cqYUtrIGQIYxSxneU4HCrA2U1XfjqpYxmDaPC8FQuPed6afp5BXl5Yp PlKn/eIxT5KVlsAgntFWxzrvYycgGBI7eI1Led+78BKPl77lH0Gz3e2l3l68PL19U2 ACW1i69tiCWByIIL38bCtr2a/Ls/wsgv9J/KUA8C8QBzZpZuUo7wpYNy65dXaTbB52 Wj03PYFIS+BQ2c9TSvJ34h8CxWkGiId0hLh/7BaUTWWmwhP/Zy52vrY75W5WUVeugb zdZCPvR2WWQ0A== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 08/26] libbpf: Add elf_open/elf_close functions Date: Fri, 30 Jun 2023 10:33:26 +0200 Message-ID: <20230630083344.984305-9-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding elf_open/elf_close functions and using it in elf_find_func_offset_from_file function. It will be used in following changes to save some common code. Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- tools/lib/bpf/elf.c | 59 +++++++++++++++++++++++++------------- tools/lib/bpf/libbpf_elf.h | 8 ++++++ tools/lib/bpf/usdt.c | 31 ++++++-------------- 3 files changed, 56 insertions(+), 42 deletions(-) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index 2b62b4af28ce..74e35071d22e 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -11,6 +11,40 @@ #define STRERR_BUFSIZE 128 +int elf_open(const char *binary_path, struct elf_fd *elf_fd) +{ + char errmsg[STRERR_BUFSIZE]; + int fd, ret; + Elf *elf; + + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("elf: failed to init libelf for %s\n", binary_path); + return -LIBBPF_ERRNO__LIBELF; + } + fd = open(binary_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) { + ret = -errno; + pr_warn("elf: failed to open %s: %s\n", binary_path, + libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + return ret; + } + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) { + pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); + close(fd); + return -LIBBPF_ERRNO__FORMAT; + } + elf_fd->fd = fd; + elf_fd->elf = elf; + return 0; +} + +void elf_close(struct elf_fd *elf_fd) +{ + elf_end(elf_fd->elf); + close(elf_fd->fd); +} + /* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */ static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) { @@ -171,28 +205,13 @@ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) */ long elf_find_func_offset_from_file(const char *binary_path, const char *name) { - char errmsg[STRERR_BUFSIZE]; + struct elf_fd elf_fd; long ret = -ENOENT; - Elf *elf; - int fd; - fd = open(binary_path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - ret = -errno; - pr_warn("failed to open %s: %s\n", binary_path, - libbpf_strerror_r(ret, errmsg, sizeof(errmsg))); + ret = elf_open(binary_path, &elf_fd); + if (ret) return ret; - } - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1)); - close(fd); - return -LIBBPF_ERRNO__FORMAT; - } - - ret = elf_find_func_offset(elf, binary_path, name); - elf_end(elf); - close(fd); + ret = elf_find_func_offset(elf_fd.elf, binary_path, name); + elf_close(&elf_fd); return ret; } - diff --git a/tools/lib/bpf/libbpf_elf.h b/tools/lib/bpf/libbpf_elf.h index 1b652220fabf..c763ac35a85e 100644 --- a/tools/lib/bpf/libbpf_elf.h +++ b/tools/lib/bpf/libbpf_elf.h @@ -5,6 +5,14 @@ #include +struct elf_fd { + Elf *elf; + int fd; +}; + +int elf_open(const char *binary_path, struct elf_fd *elf_fd); +void elf_close(struct elf_fd *elf_fd); + long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); long elf_find_func_offset_from_file(const char *binary_path, const char *name); diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index f1a141555f08..9fa883ebc0bd 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -19,6 +19,7 @@ #include "libbpf.h" #include "libbpf_common.h" #include "libbpf_internal.h" +#include "libbpf_elf.h" #include "hashmap.h" /* libbpf's USDT support consists of BPF-side state/code and user-space @@ -943,32 +944,22 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie) { - int i, fd, err, spec_map_fd, ip_map_fd; + int i, err, spec_map_fd, ip_map_fd; LIBBPF_OPTS(bpf_uprobe_opts, opts); struct hashmap *specs_hash = NULL; struct bpf_link_usdt *link = NULL; struct usdt_target *targets = NULL; + struct elf_fd elf_fd; size_t target_cnt; - Elf *elf; spec_map_fd = bpf_map__fd(man->specs_map); ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map); - fd = open(path, O_RDONLY | O_CLOEXEC); - if (fd < 0) { - err = -errno; - pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err); + err = elf_open(path, &elf_fd); + if (err) return libbpf_err_ptr(err); - } - elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); - if (!elf) { - err = -EBADF; - pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1)); - goto err_out; - } - - err = sanity_check_usdt_elf(elf, path); + err = sanity_check_usdt_elf(elf_fd.elf, path); if (err) goto err_out; @@ -981,7 +972,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct /* discover USDT in given binary, optionally limiting * activations to a given PID, if pid > 0 */ - err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name, + err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name, usdt_cookie, &targets, &target_cnt); if (err <= 0) { err = (err == 0) ? -ENOENT : err; @@ -1066,9 +1057,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct free(targets); hashmap__free(specs_hash); - elf_end(elf); - close(fd); - + elf_close(&elf_fd); return &link->link; err_out: @@ -1076,9 +1065,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_link__destroy(&link->link); free(targets); hashmap__free(specs_hash); - if (elf) - elf_end(elf); - close(fd); + elf_close(&elf_fd); return libbpf_err_ptr(err); } From patchwork Fri Jun 30 08:33:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297788 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 25327A932 for ; Fri, 30 Jun 2023 08:35:38 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id D7EC1C433C8; Fri, 30 Jun 2023 08:35:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114138; bh=zrm949cB1mOryVNymXblH8xLJVu+X/RGQp9RYXMNCDE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=B14BSl5joaH2aXwwo6Bmggwga/QOq9WnTakCnZ+EL7XJyvlU/jlTsOWwzvgYu254+ q7ivHLAlfbadVilh6QXBNBNm9QLy42saJY7fcS8TM3sx8OwJHixtpQcptrzagMOuDS SIXpyKVCFoNOuwxTY32E5LyBWHzu0YdK7Kd2/mXruSqFR1ql6tOc0kjvoBDnMCGl3y nXMORCa2d8EUrapyj/eJWPPJG7PhROc3oU+N7jSDU+LMuxG6emOEzNEiP+bJlGxW80 VNGzl1Va7QV0FQQ3ddu/Pr6t6Y+jAk6+X0+RL96T7sFU6c7AdSmuhMEhFXOVLimSRA NB1LfILWglDFA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 09/26] libbpf: Add elf symbol iterator Date: Fri, 30 Jun 2023 10:33:27 +0200 Message-ID: <20230630083344.984305-10-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding elf symbol iterator object (and some functions) that follow open-coded iterator pattern and some functions to ease up iterating elf object symbols. The idea is to iterate single symbol section with: struct elf_sym_iter iter; struct elf_sym *sym; if (elf_sym_iter_new(&iter, elf, binary_path, SHT_DYNSYM)) goto error; while ((sym = elf_sym_iter_next(&iter))) { ... } I considered opening the elf inside the iterator and iterate all symbol sections, but then it gets more complicated wrt user checks for when the next section is processed. Plus side is the we don't need 'exit' function, because caller/user is in charge of that. The returned iterated symbol object from elf_sym_iter_next function is placed inside the struct elf_sym_iter, so no extra allocation or argument is needed. Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- tools/lib/bpf/elf.c | 178 +++++++++++++++++++++++++++++--------------- 1 file changed, 117 insertions(+), 61 deletions(-) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index 74e35071d22e..fcce4bd2478f 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -59,6 +59,108 @@ static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn) return NULL; } +struct elf_sym { + const char *name; + GElf_Sym sym; + GElf_Shdr sh; +}; + +struct elf_sym_iter { + Elf *elf; + Elf_Data *syms; + size_t nr_syms; + size_t strtabidx; + size_t next_sym_idx; + struct elf_sym sym; + int st_type; +}; + +static int elf_sym_iter_new(struct elf_sym_iter *iter, + Elf *elf, const char *binary_path, + int sh_type, int st_type) +{ + Elf_Scn *scn = NULL; + GElf_Ehdr ehdr; + GElf_Shdr sh; + + memset(iter, 0, sizeof(*iter)); + + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1)); + return -EINVAL; + } + + scn = elf_find_next_scn_by_type(elf, sh_type, NULL); + if (!scn) { + pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", + binary_path); + return -ENOENT; + } + + if (!gelf_getshdr(scn, &sh)) + return -EINVAL; + + iter->strtabidx = sh.sh_link; + iter->syms = elf_getdata(scn, 0); + if (!iter->syms) { + pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", + binary_path, elf_errmsg(-1)); + return -EINVAL; + } + iter->nr_syms = iter->syms->d_size / sh.sh_entsize; + iter->elf = elf; + iter->st_type = st_type; + return 0; +} + +static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter) +{ + struct elf_sym *ret = &iter->sym; + GElf_Sym *sym = &ret->sym; + const char *name = NULL; + Elf_Scn *sym_scn; + size_t idx; + + for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) { + if (!gelf_getsym(iter->syms, idx, sym)) + continue; + if (GELF_ST_TYPE(sym->st_info) != iter->st_type) + continue; + name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name); + if (!name) + continue; + + /* Transform symbol's virtual address (absolute for + * binaries and relative for shared libs) into file + * offset, which is what kernel is expecting for + * uprobe/uretprobe attachment. + * See Documentation/trace/uprobetracer.rst for more + * details. + * This is done by looking up symbol's containing + * section's header and using iter's virtual address + * (sh_addr) and corresponding file offset (sh_offset) + * to transform sym.st_value (virtual address) into + * desired final file offset. + */ + sym_scn = elf_getscn(iter->elf, sym->st_shndx); + if (!sym_scn) + continue; + if (!gelf_getshdr(sym_scn, &ret->sh)) + continue; + + iter->next_sym_idx = idx + 1; + ret->name = name; + return ret; + } + + return NULL; +} + +static unsigned long elf_sym_offset(struct elf_sym *sym) +{ + return sym->sym.st_value - sym->sh.sh_addr + sym->sh.sh_offset; +} + /* Find offset of function name in the provided ELF object. "binary_path" is * the path to the ELF binary represented by "elf", and only used for error * reporting matters. "name" matches symbol name or name@@LIB for library @@ -90,64 +192,36 @@ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) * reported as a warning/error. */ for (i = 0; i < ARRAY_SIZE(sh_types); i++) { - size_t nr_syms, strtabidx, idx; - Elf_Data *symbols = NULL; - Elf_Scn *scn = NULL; + struct elf_sym_iter iter; + struct elf_sym *sym; int last_bind = -1; - const char *sname; - GElf_Shdr sh; + int curr_bind; - scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL); - if (!scn) { - pr_debug("elf: failed to find symbol table ELF sections in '%s'\n", - binary_path); - continue; - } - if (!gelf_getshdr(scn, &sh)) - continue; - strtabidx = sh.sh_link; - symbols = elf_getdata(scn, 0); - if (!symbols) { - pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n", - binary_path, elf_errmsg(-1)); - ret = -LIBBPF_ERRNO__FORMAT; + ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC); + if (ret) { + if (ret == -ENOENT) + continue; goto out; } - nr_syms = symbols->d_size / sh.sh_entsize; - - for (idx = 0; idx < nr_syms; idx++) { - int curr_bind; - GElf_Sym sym; - Elf_Scn *sym_scn; - GElf_Shdr sym_sh; - - if (!gelf_getsym(symbols, idx, &sym)) - continue; - - if (GELF_ST_TYPE(sym.st_info) != STT_FUNC) - continue; - - sname = elf_strptr(elf, strtabidx, sym.st_name); - if (!sname) - continue; - - curr_bind = GELF_ST_BIND(sym.st_info); + while ((sym = elf_sym_iter_next(&iter))) { /* User can specify func, func@@LIB or func@@LIB_VERSION. */ - if (strncmp(sname, name, name_len) != 0) + if (strncmp(sym->name, name, name_len) != 0) continue; /* ...but we don't want a search for "foo" to match 'foo2" also, so any * additional characters in sname should be of the form "@@LIB". */ - if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@') + if (!is_name_qualified && sym->name[name_len] != '\0' && sym->name[name_len] != '@') continue; - if (ret >= 0) { + curr_bind = GELF_ST_BIND(sym->sym.st_info); + + if (ret > 0) { /* handle multiple matches */ if (last_bind != STB_WEAK && curr_bind != STB_WEAK) { /* Only accept one non-weak bind. */ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n", - sname, name, binary_path); + sym->name, name, binary_path); ret = -LIBBPF_ERRNO__FORMAT; goto out; } else if (curr_bind == STB_WEAK) { @@ -158,25 +232,7 @@ long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name) } } - /* Transform symbol's virtual address (absolute for - * binaries and relative for shared libs) into file - * offset, which is what kernel is expecting for - * uprobe/uretprobe attachment. - * See Documentation/trace/uprobetracer.rst for more - * details. - * This is done by looking up symbol's containing - * section's header and using it's virtual address - * (sh_addr) and corresponding file offset (sh_offset) - * to transform sym.st_value (virtual address) into - * desired final file offset. - */ - sym_scn = elf_getscn(elf, sym.st_shndx); - if (!sym_scn) - continue; - if (!gelf_getshdr(sym_scn, &sym_sh)) - continue; - - ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset; + ret = elf_sym_offset(sym); last_bind = curr_bind; } if (ret > 0) From patchwork Fri Jun 30 08:33:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297789 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 931521FB8 for ; Fri, 30 Jun 2023 08:35:50 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1FC94C433C0; Fri, 30 Jun 2023 08:35:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114150; bh=VUxbHMnLpXo/Xi6CJQ/zWxHn+cqlEAHQGuUroIq5QOE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=G2ZwRjZsPlW9//WxiiJSF94B5ZUH8WD+7c+0W+tSHUsu9ismGdgdQrcES7WfVJGeA R6W1BZ8U2Q3Obh9+GWPzuftfb2YpIprKLe4PEiQlJxIEMM7NMCDoFAb0KVEcfNy8tE LP+XeLp+PGki0Kwfa23dsmGTzlzSGhBuo5AIIv9HwrRQ7i2OC28Av0cLx5k8Yl3ozM 077ttwjGNH5PUo5yDMTujDO4pWnjwvqBr748vnY1ecPnKMDusVC7YJ5jD77JVflqlg 1ylwL38SigiQEywFhJNZloNEthaVlQRV17O1xNAnRnRLW7r5vKZ+ZjL2M8+wr+qm9W wICuZEX3D1QTQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 10/26] libbpf: Add elf_resolve_syms_offsets function Date: Fri, 30 Jun 2023 10:33:28 +0200 Message-ID: <20230630083344.984305-11-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding elf_resolve_syms_offsets function that looks up offsets for symbols specified in syms array argument. Offsets are returned in allocated array with the 'cnt' size, that needs to be released by the caller. Signed-off-by: Jiri Olsa --- tools/lib/bpf/elf.c | 105 +++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf_elf.h | 2 + 2 files changed, 107 insertions(+) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index fcce4bd2478f..7e2f3b2e1fb6 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -271,3 +271,108 @@ long elf_find_func_offset_from_file(const char *binary_path, const char *name) elf_close(&elf_fd); return ret; } + +struct symbol { + const char *name; + int bind; + int idx; +}; + +static int symbol_cmp(const void *_a, const void *_b) +{ + const struct symbol *a = _a; + const struct symbol *b = _b; + + return strcmp(a->name, b->name); +} + +int elf_resolve_syms_offsets(const char *binary_path, int cnt, + const char **syms, unsigned long **poffsets) +{ + int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + int err = 0, i, cnt_done = 0; + unsigned long *offsets; + struct symbol *symbols; + struct elf_fd elf_fd; + + err = elf_open(binary_path, &elf_fd); + if (err) + return err; + + offsets = calloc(cnt, sizeof(*offsets)); + symbols = calloc(cnt, sizeof(*symbols)); + + if (!offsets || !symbols) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < cnt; i++) { + symbols[i].name = syms[i]; + symbols[i].idx = i; + } + + qsort(symbols, cnt, sizeof(*symbols), symbol_cmp); + + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + struct elf_sym_iter iter; + struct elf_sym *sym; + + err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); + if (err) { + if (err == -ENOENT) + continue; + goto out; + } + + while ((sym = elf_sym_iter_next(&iter))) { + int bind = GELF_ST_BIND(sym->sym.st_info); + struct symbol *found, tmp = { + .name = sym->name, + }; + unsigned long *offset; + + found = bsearch(&tmp, symbols, cnt, sizeof(*symbols), symbol_cmp); + if (!found) + continue; + + offset = &offsets[found->idx]; + if (*offset > 0) { + /* same offset, no problem */ + if (*offset == elf_sym_offset(sym)) + continue; + /* handle multiple matches */ + if (found->bind != STB_WEAK && bind != STB_WEAK) { + /* Only accept one non-weak bind. */ + pr_warn("elf: ambiguous match foundr '%s', '%s' in '%s'\n", + sym->name, found->name, binary_path); + err = -LIBBPF_ERRNO__FORMAT; + goto out; + } else if (bind == STB_WEAK) { + /* already have a non-weak bind, and + * this is a weak bind, so ignore. + */ + continue; + } + } else { + cnt_done++; + } + *offset = elf_sym_offset(sym); + found->bind = bind; + } + } + + if (cnt != cnt_done) { + err = -ENOENT; + goto out; + } + + *poffsets = offsets; + +out: + free(symbols); + if (err) + free(offsets); + elf_close(&elf_fd); + return err; +} diff --git a/tools/lib/bpf/libbpf_elf.h b/tools/lib/bpf/libbpf_elf.h index c763ac35a85e..026c7b378727 100644 --- a/tools/lib/bpf/libbpf_elf.h +++ b/tools/lib/bpf/libbpf_elf.h @@ -16,4 +16,6 @@ void elf_close(struct elf_fd *elf_fd); long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name); long elf_find_func_offset_from_file(const char *binary_path, const char *name); +int elf_resolve_syms_offsets(const char *binary_path, int cnt, + const char **syms, unsigned long **poffsets); #endif /* *__LIBBPF_LIBBPF_ELF_H */ From patchwork Fri Jun 30 08:33:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297790 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B0619A932 for ; Fri, 30 Jun 2023 08:36:02 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1AB78C433C0; Fri, 30 Jun 2023 08:35:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114162; bh=ksBU+QIJshjNzOO4TrnPBH/WUhA50RSmIomZyzng9Go=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=CQcYeEnvGDWlZ98Os16GIRfN9dnV96QKfgK68dA1159170TPKxNWDYN9wuAl/Kl7E gkYBQ01vEE4btR5NttHT7f/l8WnuRdCyuHvUAdM3oyOLnW1FwyUTzKHheJx9j+3mc+ 2LTKeEmdJXwBl89tK5Q92pRVcK9UGy7Y2lzxpdg2/vOYZ6guolgCsf5Hx2YPDcUnLM euwQuTwjgjKF5E5oo6Y8vRsWXme/rWrV6s+WNB+dymyuixsSBz/cG/GZxCEHoe8jwA kTLkEV5SFLVQzQQBs8D/MdLTZu6/LiYmxS4lona5rfhogA6a1p+at9L7xATYD0lGG1 HrU3J3FqGwkCQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 11/26] libbpf: Add elf_resolve_pattern_offsets function Date: Fri, 30 Jun 2023 10:33:29 +0200 Message-ID: <20230630083344.984305-12-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding elf_resolve_pattern_offsets function that looks up offsets for symbols specified by pattern argument. The 'pattern' argument allows wildcards (*?' supported). Offsets are returned in allocated array together with its size and needs to be released by the caller. Signed-off-by: Jiri Olsa --- tools/lib/bpf/elf.c | 57 +++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.c | 2 +- tools/lib/bpf/libbpf_elf.h | 3 ++ tools/lib/bpf/libbpf_internal.h | 1 + 4 files changed, 62 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index 7e2f3b2e1fb6..f2d1a8cc2f9d 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -376,3 +376,60 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt, elf_close(&elf_fd); return err; } + +int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, + unsigned long **poffsets, size_t *pcnt) +{ + int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; + unsigned long *offsets = NULL; + size_t cap = 0, cnt = 0; + struct elf_fd elf_fd; + int err = 0, i; + + err = elf_open(binary_path, &elf_fd); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sh_types); i++) { + struct elf_sym_iter iter; + struct elf_sym *sym; + + err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); + if (err) { + if (err == -ENOENT) + continue; + goto out; + } + + while ((sym = elf_sym_iter_next(&iter))) { + if (!glob_match(sym->name, pattern)) + continue; + + err = libbpf_ensure_mem((void **) &offsets, &cap, sizeof(*offsets), + cnt + 1); + if (err) + goto out; + + offsets[cnt++] = elf_sym_offset(sym); + } + + /* If we found anything in the first symbol section, + * do not search others to avoid duplicates. + */ + if (cnt) + break; + } + + if (cnt) { + *poffsets = offsets; + *pcnt = cnt; + } else { + err = -ENOENT; + } + +out: + if (err) + free(offsets); + elf_close(&elf_fd); + return err; +} diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 093add8124d8..f33ef7cb1adc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10509,7 +10509,7 @@ struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog, } /* Adapted from perf/util/string.c */ -static bool glob_match(const char *str, const char *pat) +bool glob_match(const char *str, const char *pat) { while (*str && *pat && *pat != '*') { if (*pat == '?') { /* Matches any single character */ diff --git a/tools/lib/bpf/libbpf_elf.h b/tools/lib/bpf/libbpf_elf.h index 026c7b378727..0c75d3b2398b 100644 --- a/tools/lib/bpf/libbpf_elf.h +++ b/tools/lib/bpf/libbpf_elf.h @@ -18,4 +18,7 @@ long elf_find_func_offset_from_file(const char *binary_path, const char *name); int elf_resolve_syms_offsets(const char *binary_path, int cnt, const char **syms, unsigned long **poffsets); + +int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, + unsigned long **poffsets, size_t *pcnt); #endif /* *__LIBBPF_LIBBPF_ELF_H */ diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index e4d05662a96c..7d75b92e531a 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -577,4 +577,5 @@ static inline bool is_pow_of_2(size_t x) #define PROG_LOAD_ATTEMPTS 5 int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts); +bool glob_match(const char *str, const char *pat); #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ From patchwork Fri Jun 30 08:33:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297791 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 05C05A932 for ; Fri, 30 Jun 2023 08:36:14 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4B8BEC433C0; Fri, 30 Jun 2023 08:36:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114173; bh=Xppr0OLoc0Y5WpjZBukLc1k/JAPv214hJBEM50YhaxU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=TPtVABjwB7oneVabYgqIdn+AupbZlIkcOSKEoip+tFP2xGTQWgsuZhy2DOZ69YFcf YTLkKsUiqoes3Gbxsy59YMOdNFmGlXJDEgp/8pDQNd9y+DjkmJPBAjRyNLYsrIlsYN 4HgrMZhK+rOCiqFt0IpKo3VY71+9yRvKp0Z81SPSJRW7opNJVMegM95VojDyfzuXi+ Eo2asnyqyrmP1S2aIJbL6OGUjK129Bh2epD8oIpeGtFEGnDPK93rAKDrIW1mtPJ2cX Y92VMmhYTr7cNzaR7JFBrzsWwvdY1IG3fNqITwULBqXkUTvYDB+Y+jCib5F/+xQJ7f JGsEAhcLufCYg== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 12/26] libbpf: Add bpf_link_create support for multi uprobes Date: Fri, 30 Jun 2023 10:33:30 +0200 Message-ID: <20230630083344.984305-13-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding new uprobe_multi struct to bpf_link_create_opts object to pass multiple uprobe data to link_create attr uapi. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa --- tools/lib/bpf/bpf.c | 11 +++++++++++ tools/lib/bpf/bpf.h | 11 ++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index ed86b37d8024..0fd35c91f50c 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -733,6 +733,17 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, kprobe_multi)) return libbpf_err(-EINVAL); break; + case BPF_TRACE_UPROBE_MULTI: + attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0); + attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0); + attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0)); + attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0)); + attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0)); + attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0)); + attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0); + if (!OPTS_ZEROED(opts, uprobe_multi)) + return libbpf_err(-EINVAL); + break; case BPF_TRACE_FENTRY: case BPF_TRACE_FEXIT: case BPF_MODIFY_RETURN: diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 9aa0ee473754..82979b4f2769 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -346,13 +346,22 @@ struct bpf_link_create_opts { const unsigned long *addrs; const __u64 *cookies; } kprobe_multi; + struct { + __u32 flags; + __u32 cnt; + const char *path; + const unsigned long *offsets; + const unsigned long *ref_ctr_offsets; + const __u64 *cookies; + __u32 pid; + } uprobe_multi; struct { __u64 cookie; } tracing; }; size_t :0; }; -#define bpf_link_create_opts__last_field kprobe_multi.cookies +#define bpf_link_create_opts__last_field uprobe_multi.pid LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, From patchwork Fri Jun 30 08:33:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297792 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9352DA932 for ; Fri, 30 Jun 2023 08:36:25 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A3E57C433C0; Fri, 30 Jun 2023 08:36:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114185; bh=2Yq5mpC+pEG+MrqXH74X96nZwjknPf5/vBk2V8r242g=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kRg49m1Xwl+F7/6R3U0uXelkVD6v14VPyCNZjSyKGHg/iwR+vasBBn+zdHjfn+bV0 0mUcnCnjCF4LnO2jrYnd53yIMjnHVrwXSpvxgrMpYjKYra73/5zonjaCzCAnFnaX7B QTAHhTqGHxXjb06NL/uUoOLrt14TNZt0smX7KwOUeVKUk5yzNA/CGnrtbNQBiqgyVI 7Qt7xOqK0haB9PTqNHZBr4peEgPzf7wZbrSq7nkh5RyfqlNu9UPz9JhbS2PAV/uhjI 2qKjFr8F9/K53yjDEYezxqQVd7C3b0q5mCk8U+F6FtPAu678JSAAQtjF217ZHypmlX nAp00L6PpexJA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 13/26] libbpf: Add bpf_program__attach_uprobe_multi function Date: Fri, 30 Jun 2023 10:33:31 +0200 Message-ID: <20230630083344.984305-14-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding bpf_program__attach_uprobe_multi function that allows to attach multiple uprobes with uprobe_multi link. The user can specify uprobes with direct arguments: binary_path/func_pattern/pid or with struct bpf_uprobe_multi_opts opts argument fields: const char **syms; const unsigned long *offsets; const unsigned long *ref_ctr_offsets; const __u64 *cookies; User can specify 2 mutually exclusive set of inputs: 1) use only path/func_pattern/pid arguments 2) use path/pid with allowed combinations of: syms/offsets/ref_ctr_offsets/cookies/cnt - syms and offsets are mutually exclusive - ref_ctr_offsets and cookies are optional Any other usage results in error. Signed-off-by: Jiri Olsa --- tools/lib/bpf/libbpf.c | 122 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 27 +++++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 150 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f33ef7cb1adc..b942f248038e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10954,6 +10954,128 @@ static int resolve_full_path(const char *file, char *result, size_t result_sz) return -ENOENT; } +struct bpf_link * +bpf_program__attach_uprobe_multi(const struct bpf_program *prog, + pid_t pid, + const char *path, + const char *func_pattern, + const struct bpf_uprobe_multi_opts *opts) +{ + const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL; + LIBBPF_OPTS(bpf_link_create_opts, lopts); + unsigned long *resolved_offsets = NULL; + int err = 0, link_fd, prog_fd; + struct bpf_link *link = NULL; + char errmsg[STRERR_BUFSIZE]; + char full_path[PATH_MAX]; + const __u64 *cookies; + const char **syms; + bool has_pattern; + bool retprobe; + size_t cnt; + + if (!OPTS_VALID(opts, bpf_uprobe_multi_opts)) + return libbpf_err_ptr(-EINVAL); + + syms = OPTS_GET(opts, syms, NULL); + offsets = OPTS_GET(opts, offsets, NULL); + ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL); + cookies = OPTS_GET(opts, cookies, NULL); + cnt = OPTS_GET(opts, cnt, 0); + + /* + * User can specify 2 mutually exclusive set of inputs: + * + * 1) use only path/func_pattern/pid arguments + * + * 2) use path/pid with allowed combinations of: + * syms/offsets/ref_ctr_offsets/cookies/cnt + * + * - syms and offsets are mutually exclusive + * - ref_ctr_offsets and cookies are optional + * + * Any other usage results in error. + */ + + if (!path && !func_pattern && !cnt) + return libbpf_err_ptr(-EINVAL); + if (func_pattern && !path) + return libbpf_err_ptr(-EINVAL); + + has_pattern = path && func_pattern; + + if (has_pattern) { + if (syms || offsets || ref_ctr_offsets || cookies || cnt) + return libbpf_err_ptr(-EINVAL); + } else { + if (!cnt) + return libbpf_err_ptr(-EINVAL); + if (!!syms == !!offsets) + return libbpf_err_ptr(-EINVAL); + } + + if (has_pattern) { + if (!strchr(path, '/')) { + err = resolve_full_path(path, full_path, sizeof(full_path)); + if (err) { + pr_warn("prog '%s': failed to resolve full path for '%s': %d\n", + prog->name, path, err); + return libbpf_err_ptr(err); + } + path = full_path; + } + + err = elf_resolve_pattern_offsets(path, func_pattern, + &resolved_offsets, &cnt); + if (err < 0) + return libbpf_err_ptr(err); + offsets = resolved_offsets; + } else if (syms) { + err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets); + if (err < 0) + return libbpf_err_ptr(err); + offsets = resolved_offsets; + } + + retprobe = OPTS_GET(opts, retprobe, false); + + lopts.uprobe_multi.path = path; + lopts.uprobe_multi.offsets = offsets; + lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets; + lopts.uprobe_multi.cookies = cookies; + lopts.uprobe_multi.cnt = cnt; + lopts.uprobe_multi.flags = retprobe ? BPF_F_UPROBE_MULTI_RETURN : 0; + + if (pid == 0) + pid = getpid(); + if (pid > 0) + lopts.uprobe_multi.pid = pid; + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto error; + } + link->detach = &bpf_link__detach_fd; + + prog_fd = bpf_program__fd(prog); + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto error; + } + link->fd = link_fd; + free(resolved_offsets); + return link; + +error: + free(resolved_offsets); + free(link); + return libbpf_err_ptr(err); +} + LIBBPF_API struct bpf_link * bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, const char *binary_path, size_t func_offset, diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 754da73c643b..7c218f610210 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -529,6 +529,33 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts); +struct bpf_uprobe_multi_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* array of function symbols to attach */ + const char **syms; + /* array of function addresses to attach */ + const unsigned long *offsets; + /* array of refctr offsets to attach */ + const unsigned long *ref_ctr_offsets; + /* array of user-provided values fetchable through bpf_get_attach_cookie */ + const __u64 *cookies; + /* number of elements in syms/addrs/cookies arrays */ + size_t cnt; + /* create return uprobes */ + bool retprobe; + size_t :0; +}; + +#define bpf_uprobe_multi_opts__last_field retprobe + +LIBBPF_API struct bpf_link * +bpf_program__attach_uprobe_multi(const struct bpf_program *prog, + pid_t pid, + const char *binary_path, + const char *func_pattern, + const struct bpf_uprobe_multi_opts *opts); + struct bpf_ksyscall_opts { /* size of this struct, for forward/backward compatibility */ size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 7521a2fb7626..d8d11ea6c35e 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -395,4 +395,5 @@ LIBBPF_1.2.0 { LIBBPF_1.3.0 { global: bpf_obj_pin_opts; + bpf_program__attach_uprobe_multi; } LIBBPF_1.2.0; From patchwork Fri Jun 30 08:33:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297793 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BBC8BA932 for ; Fri, 30 Jun 2023 08:36:37 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6ACCBC433C0; Fri, 30 Jun 2023 08:36:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114197; bh=wmfLgJRPS2h6ShxK5nR4rkG2qWNqblTG4Z1ZPFoze1Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=HpZQinMKqWdj034T6bxRyq9F8OTo1f0WZCdmL7OJf3I/pQFW0Y9IkbfL6VcniJOav 516yZV+v3hAM3fRdNW2EAFVyiWqHOzWlmMJhYMP4aX6wdqzmBUNclYgOOEgVsVJxpI 8ZsPgQ/0AWGlGLOhH7eS+FgkARzfzq6XekPGfMAWpMU0r8T+0pUd/Z3zKZgJuGOgSB Ml1yEP83w7m2vAfqyuS1EKf59EK0C/erpjbAuA2Fb0awEwC/ZEXGX9y6CQpMCuY/29 tJx3/OaYovbQp8opCgU3VS5/VqmTbFNMffvj6wOSVR9jOeHjOGhBuIN32i3TKlpIf0 5PfhQUxq+ynFw== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 14/26] libbpf: Add support for u[ret]probe.multi[.s] program sections Date: Fri, 30 Jun 2023 10:33:32 +0200 Message-ID: <20230630083344.984305-15-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support for several uprobe_multi program sections to allow auto attach of multi_uprobe programs. Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- tools/lib/bpf/libbpf.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b942f248038e..06092b9752f1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8654,6 +8654,7 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); @@ -8669,6 +8670,10 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe), SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), + SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi), + SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), + SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt), @@ -10730,6 +10735,37 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru return libbpf_get_error(*link); } +static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + char *probe_type = NULL, *binary_path = NULL, *func_name = NULL; + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + int n, ret = -EINVAL; + + *link = NULL; + + n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%ms", + &probe_type, &binary_path, &func_name); + switch (n) { + case 1: + /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */ + ret = 0; + break; + case 3: + opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0; + *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts); + ret = libbpf_get_error(*link); + break; + default: + pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name, + prog->sec_name); + break; + } + free(probe_type); + free(binary_path); + free(func_name); + return ret; +} + static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, const char *binary_path, uint64_t offset) { From patchwork Fri Jun 30 08:33:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297794 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CD1651FB8 for ; Fri, 30 Jun 2023 08:36:48 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3D859C433C0; Fri, 30 Jun 2023 08:36:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114208; bh=nSnOgIP1ZTUTvNpA5DrwAPnY2lyaqv1+WHhbzVyevIo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KEi8SBmC/Y1AEbjsdx7tnFpyElwGHJeicLIbUB5iVizlcyxWL/NzR4YmCOmtdnZMe lN9u1zXedATAecQl8ptOkWYIwu8upzhJk3prYC56WFku85qWDa/KL4PEfFQaCP4DFs ZJO642LCJ6qhGoED4VlyI3BmmV+PApGPQioGdSlgxrel3HaF6dNw/xI3fzU1EDwOQm uWf6FUalt/5wtYNn4okLrcJpBueCl3k6kmJ0rRBs5ANz9oPymTH/oB0YaND4W33Z6N JttQvO96rfT8/5aaWOQoTSJ+s0Jj2Sx7+EBMUWlII2OjUeVSgpb8OOqz1BS+taLZIp LAsgvHhDM3v1w== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 15/26] libbpf: Add uprobe multi link detection Date: Fri, 30 Jun 2023 10:33:33 +0200 Message-ID: <20230630083344.984305-16-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding uprobe-multi link detection. It will be used later in bpf_program__attach_usdt function to check and use uprobe_multi link over standard uprobe links. Signed-off-by: Jiri Olsa --- tools/lib/bpf/libbpf.c | 35 +++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf_internal.h | 2 ++ 2 files changed, 37 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 06092b9752f1..4f61f9dc1748 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4817,6 +4817,38 @@ static int probe_perf_link(void) return link_fd < 0 && err == -EBADF; } +static int probe_uprobe_multi_link(void) +{ + LIBBPF_OPTS(bpf_prog_load_opts, load_opts, + .expected_attach_type = BPF_TRACE_UPROBE_MULTI, + ); + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + unsigned long offset = 0; + int prog_fd, link_fd; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", + insns, ARRAY_SIZE(insns), &load_opts); + if (prog_fd < 0) + return -errno; + + /* create single uprobe on offset 0 in current process */ + link_opts.uprobe_multi.path = "/proc/self/exe"; + link_opts.uprobe_multi.offsets = &offset; + link_opts.uprobe_multi.cnt = 1; + + link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); + + if (link_fd >= 0) + close(link_fd); + close(prog_fd); + + return link_fd >= 0; +} + static int probe_kern_bpf_cookie(void) { struct bpf_insn insns[] = { @@ -4913,6 +4945,9 @@ static struct kern_feature_desc { [FEAT_SYSCALL_WRAPPER] = { "Kernel using syscall wrapper", probe_kern_syscall_wrapper, }, + [FEAT_UPROBE_MULTI_LINK] = { + "BPF uprobe multi link support", probe_uprobe_multi_link, + }, }; bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 7d75b92e531a..9c04b3fe1207 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -354,6 +354,8 @@ enum kern_feature_id { FEAT_BTF_ENUM64, /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */ FEAT_SYSCALL_WRAPPER, + /* BPF uprobe_multi link support */ + FEAT_UPROBE_MULTI_LINK, __FEAT_CNT, }; From patchwork Fri Jun 30 08:33:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297795 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3B2E61FB8 for ; Fri, 30 Jun 2023 08:37:00 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id BE18CC433C0; Fri, 30 Jun 2023 08:36:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114220; bh=JRI1/WGQ0U4eaP2uH9LXZN6HtzAj/Pod2zVxg7IQ3C4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=WNL3TSYP2pB2gVkHsuzg2XEmRDsV6kvRUOuWHWWXUJgE0ZnB5h2wB/ZWSB/MNrzwQ AyB1rlYmpx4O59zMxaVXSwNSrYwZDoI0RbeUSa+neI7+qQDOJaoISlF2+RIxiT7ifW jVP670n7kLymsHFuwdBDr8BHmoi4IfJEfww6Wqs+Ij+VdnxxA8Kd2gH8xN80t17zuR uPA5p2gb6LnZjoAoekv77I0CRr3xOpDuwj0rl77E+0BVzQFBNdpxfHu65Qj/LSus+Z w+6iB2LjEKBorS0H6mg3Ve5obMylZlaZ6HNhGA0chifcAMN3eeO2V1Sg3erCPbppD3 GdhF8g9YZXIjQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 16/26] libbpf: Add uprobe multi link support to bpf_program__attach_usdt Date: Fri, 30 Jun 2023 10:33:34 +0200 Message-ID: <20230630083344.984305-17-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support for usdt_manager_attach_usdt to use uprobe_multi link to attach to usdt probes. The uprobe_multi support is detected before the usdt program is loaded and its expected_attach_type is set accordingly. If uprobe_multi support is detected the usdt_manager_attach_usdt gathers uprobes info and calls bpf_program__attach_uprobe to create all needed uprobes. If uprobe_multi support is not detected the old behaviour stays. Also adding usdt.s program section for sleepable usdt probes. Signed-off-by: Jiri Olsa --- tools/lib/bpf/libbpf.c | 13 +++++-- tools/lib/bpf/usdt.c | 78 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 75 insertions(+), 16 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4f61f9dc1748..e234c2e860f9 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -365,6 +365,8 @@ enum sec_def_flags { SEC_SLEEPABLE = 8, /* BPF program support non-linear XDP buffer */ SEC_XDP_FRAGS = 16, + /* Setup proper attach type for usdt probes. */ + SEC_USDT = 32, }; struct bpf_sec_def { @@ -6807,6 +6809,10 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog, if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + /* special check for usdt to use uprobe_multi link */ + if ((def & SEC_USDT) && kernel_supports(NULL, FEAT_UPROBE_MULTI_LINK)) + prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI; + if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) { int btf_obj_fd = 0, btf_type_id = 0, err; const char *attach_name; @@ -6875,7 +6881,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog if (!insns || !insns_cnt) return -EINVAL; - load_attr.expected_attach_type = prog->expected_attach_type; if (kernel_supports(obj, FEAT_PROG_NAME)) prog_name = prog->name; load_attr.attach_prog_fd = prog->attach_prog_fd; @@ -6911,6 +6916,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog insns_cnt = prog->insns_cnt; } + /* allow prog_prepare_load_fn to change expected_attach_type */ + load_attr.expected_attach_type = prog->expected_attach_type; + if (obj->gen_loader) { bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name, license, insns, insns_cnt, &load_attr, @@ -8711,7 +8719,8 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi), SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall), - SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt), + SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt), + SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT|SEC_SLEEPABLE, attach_usdt), SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index 9fa883ebc0bd..6ff66a8eaf85 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -809,6 +809,8 @@ struct bpf_link_usdt { long abs_ip; struct bpf_link *link; } *uprobes; + + struct bpf_link *multi_link; }; static int bpf_link_usdt_detach(struct bpf_link *link) @@ -817,6 +819,9 @@ static int bpf_link_usdt_detach(struct bpf_link *link) struct usdt_manager *man = usdt_link->usdt_man; int i; + /* When having multi_link, uprobe_cnt is 0 */ + bpf_link__destroy(usdt_link->multi_link); + for (i = 0; i < usdt_link->uprobe_cnt; i++) { /* detach underlying uprobe link */ bpf_link__destroy(usdt_link->uprobes[i].link); @@ -944,11 +949,13 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie) { + unsigned long *offsets = NULL, *ref_ctr_offsets = NULL; int i, err, spec_map_fd, ip_map_fd; LIBBPF_OPTS(bpf_uprobe_opts, opts); struct hashmap *specs_hash = NULL; struct bpf_link_usdt *link = NULL; struct usdt_target *targets = NULL; + __u64 *cookies = NULL; struct elf_fd elf_fd; size_t target_cnt; @@ -995,10 +1002,21 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct link->link.detach = &bpf_link_usdt_detach; link->link.dealloc = &bpf_link_usdt_dealloc; - link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); - if (!link->uprobes) { - err = -ENOMEM; - goto err_out; + if (kernel_supports(NULL, FEAT_UPROBE_MULTI_LINK)) { + offsets = calloc(target_cnt, sizeof(*offsets)); + cookies = calloc(target_cnt, sizeof(*cookies)); + ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets)); + + if (!offsets || !ref_ctr_offsets || !cookies) { + err = -ENOMEM; + goto err_out; + } + } else { + link->uprobes = calloc(target_cnt, sizeof(*link->uprobes)); + if (!link->uprobes) { + err = -ENOMEM; + goto err_out; + } } for (i = 0; i < target_cnt; i++) { @@ -1039,20 +1057,48 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct goto err_out; } - opts.ref_ctr_offset = target->sema_off; - opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; - uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, - target->rel_ip, &opts); - err = libbpf_get_error(uprobe_link); + if (kernel_supports(NULL, FEAT_UPROBE_MULTI_LINK)) { + offsets[i] = target->rel_ip; + ref_ctr_offsets[i] = target->sema_off; + cookies[i] = spec_id; + } else { + opts.ref_ctr_offset = target->sema_off; + opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0; + uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path, + target->rel_ip, &opts); + err = libbpf_get_error(uprobe_link); + if (err) { + pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", + i, usdt_provider, usdt_name, path, err); + goto err_out; + } + + link->uprobes[i].link = uprobe_link; + link->uprobes[i].abs_ip = target->abs_ip; + link->uprobe_cnt++; + } + } + + if (kernel_supports(NULL, FEAT_UPROBE_MULTI_LINK)) { + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi, + .cnt = target_cnt, + .offsets = offsets, + .ref_ctr_offsets = ref_ctr_offsets, + .cookies = cookies, + ); + + link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path, + NULL, &opts_multi); + err = libbpf_get_error(link->multi_link); if (err) { - pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n", - i, usdt_provider, usdt_name, path, err); + pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n", + usdt_provider, usdt_name, path, err); goto err_out; } - link->uprobes[i].link = uprobe_link; - link->uprobes[i].abs_ip = target->abs_ip; - link->uprobe_cnt++; + free(offsets); + free(ref_ctr_offsets); + free(cookies); } free(targets); @@ -1061,6 +1107,10 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct return &link->link; err_out: + free(offsets); + free(ref_ctr_offsets); + free(cookies); + if (link) bpf_link__destroy(&link->link); free(targets); From patchwork Fri Jun 30 08:33:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297796 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4C9C11FB8 for ; Fri, 30 Jun 2023 08:37:12 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id D33FEC433C9; Fri, 30 Jun 2023 08:37:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114232; bh=r/qCypdLDUAiuSd7OKqIYDN+uJTDFtEUejCrOrITWug=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rsRhmguu3XwyL1KCTbnpOC5saJIisBe2HULUlfq8+g5Bea4uBDKUYerZdTnE6nzfi yA+APuecI4bL2MShTdUXSVg7M+YWRQbJn1lm71id68uEDwN5dVWJ9gJsNI7s8Wq0sC QxHQ6OoFO67QKog9kyvMhKZxfVPmTR8YfAXNVKX235fyZUFnO0Xj0+iiG5jIrwq7Zq nPQEcXBhsJHAFkCAXOTiEmDUOwnQAFDYyDKujisYWe+4ubZptT1mq2dJacnivMNc2E 0k8Hcd1VggQDSAwo6hNeUy0YhQ0Q602ZNfIueDt/etchpFHiO7Jf9h8ssxDQhVY8hh SjzYgoUgVwHlQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 17/26] selftests/bpf: Add uprobe_multi skel test Date: Fri, 30 Jun 2023 10:33:35 +0200 Message-ID: <20230630083344.984305-18-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding uprobe_multi test for skeleton load/attach functions, to test skeleton auto attach for uprobe_multi link. Test that bpf_get_func_ip works properly for uprobe_multi attachment. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 76 ++++++++++++++++ .../selftests/bpf/progs/uprobe_multi.c | 91 +++++++++++++++++++ 2 files changed, 167 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi.c diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c new file mode 100644 index 000000000000..5cd1116bbb62 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "uprobe_multi.skel.h" + +static char test_data[] = "test_data"; + +noinline void uprobe_multi_func_1(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_2(void) +{ + asm volatile (""); +} + +noinline void uprobe_multi_func_3(void) +{ + asm volatile (""); +} + +static void uprobe_multi_test_run(struct uprobe_multi *skel) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->user_ptr = test_data; + skel->bss->pid = getpid(); + + /* trigger all probes */ + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + /* + * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] + * function and each slepable probe (6) increments uprobe_multi_sleep_result. + */ + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); +} + +static void test_skel_api(void) +{ + struct uprobe_multi *skel = NULL; + int err; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + err = uprobe_multi__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi__attach")) + goto cleanup; + + uprobe_multi_test_run(skel); + +cleanup: + uprobe_multi__destroy(skel); +} + +void test_uprobe_multi_test(void) +{ + if (test__start_subtest("skel_api")) + test_skel_api(); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c new file mode 100644 index 000000000000..1eeb9b7b9cad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +__u64 uprobe_multi_func_1_addr = 0; +__u64 uprobe_multi_func_2_addr = 0; +__u64 uprobe_multi_func_3_addr = 0; + +__u64 uprobe_multi_func_1_result = 0; +__u64 uprobe_multi_func_2_result = 0; +__u64 uprobe_multi_func_3_result = 0; + +__u64 uretprobe_multi_func_1_result = 0; +__u64 uretprobe_multi_func_2_result = 0; +__u64 uretprobe_multi_func_3_result = 0; + +__u64 uprobe_multi_sleep_result = 0; + +int pid = 0; +bool test_cookie = false; +void *user_ptr = 0; + +static __always_inline bool verify_sleepable_user_copy(void) +{ + char data[9]; + + bpf_copy_from_user(data, sizeof(data), user_ptr); + return bpf_strncmp(data, sizeof(data), "test_data") == 0; +} + +static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep) +{ + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; + __u64 addr = bpf_get_func_ip(ctx); + +#define SET(__var, __addr, __cookie) ({ \ + if (addr == __addr && \ + (!test_cookie || (cookie == __cookie))) \ + __var += 1; \ +}) + + if (is_return) { + SET(uretprobe_multi_func_1_result, uprobe_multi_func_1_addr, 2); + SET(uretprobe_multi_func_2_result, uprobe_multi_func_2_addr, 3); + SET(uretprobe_multi_func_3_result, uprobe_multi_func_3_addr, 1); + } else { + SET(uprobe_multi_func_1_result, uprobe_multi_func_1_addr, 3); + SET(uprobe_multi_func_2_result, uprobe_multi_func_2_addr, 1); + SET(uprobe_multi_func_3_result, uprobe_multi_func_3_addr, 2); + } + +#undef SET + + if (is_sleep && verify_sleepable_user_copy()) + uprobe_multi_sleep_result += 1; +} + +SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*") +int test_uprobe(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, false, false); + return 0; +} + +SEC("uretprobe.multi//proc/self/exe:uprobe_multi_func_*") +int test_uretprobe(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, true, false); + return 0; +} + +SEC("uprobe.multi.s//proc/self/exe:uprobe_multi_func_*") +int test_uprobe_sleep(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, false, true); + return 0; +} + +SEC("uretprobe.multi.s//proc/self/exe:uprobe_multi_func_*") +int test_uretprobe_sleep(struct pt_regs *ctx) +{ + uprobe_multi_check(ctx, true, true); + return 0; +} From patchwork Fri Jun 30 08:33:36 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297797 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 29F89A932 for ; Fri, 30 Jun 2023 08:37:24 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9C1FAC433C8; Fri, 30 Jun 2023 08:37:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114244; bh=E92IeJoLSBFkGbrsbYzpFje+U2zekAnDaxcskWWFil0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kTdmJ/a8VNxJNWMkhaBEnwDqFjxZLU5MkjStmQeOqTmtnpzW/Ez8IwOoI+cZj2RO/ r/M+xvjbp6NYi9iO6YiMzKZY6TlaL5vRn72CFcsHHWTh42L2P0Z72NPIWdOI0pbGkM tz7MdNA73vA3vVkxwCBv7Q7xPjLbG8ELgHGX9kXsZmj+ChK31BRAwffGMH6a0YNvhZ MRVs+0Y++wLXn9vweDG3U2M+lUsE4j7e2M/ip2xQmnsvHYjNWcOSMotyOGsmFvyVP4 cjjuYANZCQ1ioWIwnZ+SpHT1N+hml20fMqp2oJAVpXh+0pswYeYvmC4/PgfUj3lrXC /CGQH1LVUNOCw== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 18/26] selftests/bpf: Add uprobe_multi api test Date: Fri, 30 Jun 2023 10:33:36 +0200 Message-ID: <20230630083344.984305-19-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding uprobe_multi test for bpf_program__attach_uprobe_multi attach function. Testing attachment using glob patterns and via bpf_uprobe_multi_opts paths/syms fields. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 5cd1116bbb62..f97a68871e73 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -69,8 +69,79 @@ static void test_skel_api(void) uprobe_multi__destroy(skel); } +static void +test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct bpf_link *link3 = NULL, *link4 = NULL; + struct uprobe_multi *skel = NULL; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts->retprobe = false; + link1 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + link2 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) + goto cleanup; + + opts->retprobe = false; + link3 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_sleep, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + opts->retprobe = true; + link4 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_sleep, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) + goto cleanup; + + uprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link4); + bpf_link__destroy(link3); + bpf_link__destroy(link2); + bpf_link__destroy(link1); + uprobe_multi__destroy(skel); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + + test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts); + test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api("/proc/self/exe", NULL, &opts); +} + void test_uprobe_multi_test(void) { if (test__start_subtest("skel_api")) test_skel_api(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); } From patchwork Fri Jun 30 08:33:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297798 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4E1FAA932 for ; Fri, 30 Jun 2023 08:37:36 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id B7DCFC433CC; Fri, 30 Jun 2023 08:37:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114256; bh=IoOBhwY1eF6Gv08BSOfriLcg7VCmDxonftgpfGo4Dvo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=AJ1UmnOXv5AlHtJGMw4C01T6b1uwKgasCp75cm1KFOGthPeX+isH6aMW+1cuk8sQb a9Y3ZITT0gqVGqWU3A2Yj8Pi0qGCU44+9F5uDc9ZXnRO1XOMRgBbiftBHkAJTP0aCU ZSZSIDSe71nfc9H+izpYeoMndTyW1qYJU+wO/DXVYT6iKJd5KnR4B1RWnNRBK5j2tX IhDzz9SeAiIifYen25ZjIjA4SdgALZ4s2iftj4at262TU5SDMAtPuAdYsNBO9UEYOL 2g7z0UM08ZdklWfrsfaAuf6rwFf0qUnIxdEPtJEWvhAKeGtcG7+Zj5Z8vYUTQMhKXm SMftd8IB+sB1g== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 19/26] selftests/bpf: Add uprobe_multi link test Date: Fri, 30 Jun 2023 10:33:37 +0200 Message-ID: <20230630083344.984305-20-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding uprobe_multi test for bpf_link_create attach function. Testing attachment using the struct bpf_link_create_opts. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index f97a68871e73..fd858636b8b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -3,6 +3,7 @@ #include #include #include "uprobe_multi.skel.h" +#include "bpf/libbpf_elf.h" static char test_data[] = "test_data"; @@ -136,6 +137,71 @@ static void test_attach_api_syms(void) test_attach_api("/proc/self/exe", NULL, &opts); } +static void test_link_api(void) +{ + int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *path = "/proc/self/exe"; + struct uprobe_multi *skel = NULL; + unsigned long *offsets = NULL; + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + int err; + + err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) + return; + + opts.uprobe_multi.path = path; + opts.uprobe_multi.offsets = offsets; + opts.uprobe_multi.cnt = ARRAY_SIZE(syms); + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.test_uprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_uretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + opts.kprobe_multi.flags = 0; + prog_fd = bpf_program__fd(skel->progs.test_uprobe_sleep); + link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link3_fd")) + goto cleanup; + + opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_uretprobe_sleep); + link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link4_fd")) + goto cleanup; + uprobe_multi_test_run(skel); + +cleanup: + if (link1_fd >= 0) + close(link1_fd); + if (link2_fd >= 0) + close(link2_fd); + if (link3_fd >= 0) + close(link3_fd); + if (link4_fd >= 0) + close(link4_fd); + + uprobe_multi__destroy(skel); + free(offsets); +} + void test_uprobe_multi_test(void) { if (test__start_subtest("skel_api")) @@ -144,4 +210,6 @@ void test_uprobe_multi_test(void) test_attach_api_pattern(); if (test__start_subtest("attach_api_syms")) test_attach_api_syms(); + if (test__start_subtest("link_api")) + test_link_api(); } From patchwork Fri Jun 30 08:33:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297799 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A1848A932 for ; Fri, 30 Jun 2023 08:37:48 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 50E87C433C0; Fri, 30 Jun 2023 08:37:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114268; bh=uQYpmbKAmDCIi1un+cBvbzpAOhvrbixTe8hrpSWPGSg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=uy/90XVKsIV85oBKTE4jNuiIfwdpbbRRkXJq+BezNJTZcqNogSR4MyuAghF1xY1wn uESlAA0y5p6DIqLS3IRBAOZgUwCRTxIRtzuVDWx/TEZShBozaOWxfdUf8b+eNfIvMj Df9/htoheITht/ucmCm3sWbGELLTo5RxbiGaSJ0co2V+VW05w0VXb0tJ5OjnK/r7fM VJ64qxdqabvzGiDy3fjvalfn+8Nh3Vwy0WKsATVE0iL53FozAezFGkaBbNPPyqr7Gv Rua1+jlLUyPotvqjErdhJqSCIA2nqnZ8Wm2rjo2AFQ0dT+cEARRdyUi9cNTHAidiyy IDBpYFJMazFiA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 20/26] selftests/bpf: Add uprobe_multi test program Date: Fri, 30 Jun 2023 10:33:38 +0200 Message-ID: <20230630083344.984305-21-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding uprobe_multi test program that defines 50k uprobe_multi_func_* functions and will serve as attach point for uprobe_multi bench test in following patch. Signed-off-by: Jiri Olsa --- tools/testing/selftests/bpf/Makefile | 5 ++ tools/testing/selftests/bpf/uprobe_multi.c | 53 ++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 tools/testing/selftests/bpf/uprobe_multi.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index ad6b585e0d7c..acf7c9a29082 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -567,6 +567,7 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ + $(OUTPUT)/uprobe_multi \ ima_setup.sh \ verify_sig_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) \ @@ -670,6 +671,10 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ +$(OUTPUT)/uprobe_multi: uprobe_multi.c + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ + EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c new file mode 100644 index 000000000000..115a7f6cebfa --- /dev/null +++ b/tools/testing/selftests/bpf/uprobe_multi.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#define __PASTE(a, b) a##b +#define PASTE(a, b) __PASTE(a, b) + +#define NAME(name, idx) PASTE(name, idx) + +#define DEF(name, idx) int NAME(name, idx)(void) { return 0; } +#define CALL(name, idx) NAME(name, idx)(); + +#define F(body, name, idx) body(name, idx) + +#define F10(body, name, idx) \ + F(body, PASTE(name, idx), 0) F(body, PASTE(name, idx), 1) F(body, PASTE(name, idx), 2) \ + F(body, PASTE(name, idx), 3) F(body, PASTE(name, idx), 4) F(body, PASTE(name, idx), 5) \ + F(body, PASTE(name, idx), 6) F(body, PASTE(name, idx), 7) F(body, PASTE(name, idx), 8) \ + F(body, PASTE(name, idx), 9) + +#define F100(body, name, idx) \ + F10(body, PASTE(name, idx), 0) F10(body, PASTE(name, idx), 1) F10(body, PASTE(name, idx), 2) \ + F10(body, PASTE(name, idx), 3) F10(body, PASTE(name, idx), 4) F10(body, PASTE(name, idx), 5) \ + F10(body, PASTE(name, idx), 6) F10(body, PASTE(name, idx), 7) F10(body, PASTE(name, idx), 8) \ + F10(body, PASTE(name, idx), 9) + +#define F1000(body, name, idx) \ + F100(body, PASTE(name, idx), 0) F100(body, PASTE(name, idx), 1) F100(body, PASTE(name, idx), 2) \ + F100(body, PASTE(name, idx), 3) F100(body, PASTE(name, idx), 4) F100(body, PASTE(name, idx), 5) \ + F100(body, PASTE(name, idx), 6) F100(body, PASTE(name, idx), 7) F100(body, PASTE(name, idx), 8) \ + F100(body, PASTE(name, idx), 9) + +#define F10000(body, name, idx) \ + F1000(body, PASTE(name, idx), 0) F1000(body, PASTE(name, idx), 1) F1000(body, PASTE(name, idx), 2) \ + F1000(body, PASTE(name, idx), 3) F1000(body, PASTE(name, idx), 4) F1000(body, PASTE(name, idx), 5) \ + F1000(body, PASTE(name, idx), 6) F1000(body, PASTE(name, idx), 7) F1000(body, PASTE(name, idx), 8) \ + F1000(body, PASTE(name, idx), 9) + +F10000(DEF, uprobe_multi_func_, 0) +F10000(DEF, uprobe_multi_func_, 1) +F10000(DEF, uprobe_multi_func_, 2) +F10000(DEF, uprobe_multi_func_, 3) +F10000(DEF, uprobe_multi_func_, 4) + +int main(void) +{ + F10000(CALL, uprobe_multi_func_, 0) + F10000(CALL, uprobe_multi_func_, 1) + F10000(CALL, uprobe_multi_func_, 2) + F10000(CALL, uprobe_multi_func_, 3) + F10000(CALL, uprobe_multi_func_, 4) + return 0; +} From patchwork Fri Jun 30 08:33:39 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297800 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BF96BA932 for ; Fri, 30 Jun 2023 08:37:59 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id CE567C433C8; Fri, 30 Jun 2023 08:37:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114279; bh=/x1YugDAnJqeKbdO2ZzXsemQhKAviiV9T8VjqNeHZik=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LoKtFMa+FYVnGm4GJwxl3DgSYsXseNC+gz5URlf9jffGvqHmCP4JNxbpv5IuR2nQa tC0/+mcDhIAuCgw5nKsbylYZp4zjeEDfAzoFOstotyXs5gnDWZ2HPPVSjb8n21+O79 qjZOWA+HI91Y3MI6+lC3mHmjBdwXzkAgyrtPu4IDVjF8BG2hO8biV651noCK1qLVll RXTmZxGWzkmidwTI5kMbv27+LfY0dZDwUGu1Z30xNpjoIQ/80R+VRwfh9+Uz3/6OiJ Wn39YLVuP3e1tluOztqkZV0+t78zjHlkh5I/vbQoEp4FLX2Vw1Z9OwDPePpIncXRSA +ZGoGBX/qE/Kg== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 21/26] selftests/bpf: Add uprobe_multi bench test Date: Fri, 30 Jun 2023 10:33:39 +0200 Message-ID: <20230630083344.984305-22-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding test that attaches 50k uprobes in uprobe_multi binary. After the attach is done we run the binary and make sure we get proper amount of hits. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 56 +++++++++++++++++++ .../selftests/bpf/progs/uprobe_multi.c | 9 +++ 2 files changed, 65 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index fd858636b8b0..547d46965d70 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -202,6 +202,60 @@ static void test_link_api(void) free(offsets); } +static inline __u64 get_time_ns(void) +{ + struct timespec t; + + clock_gettime(CLOCK_MONOTONIC, &t); + return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; +} + +static void test_bench_attach_uprobe(void) +{ + long attach_start_ns, attach_end_ns; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + struct uprobe_multi *skel = NULL; + struct bpf_program *prog; + int err; + + skel = uprobe_multi__open(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) + goto cleanup; + + bpf_object__for_each_program(prog, skel->obj) + bpf_program__set_autoload(prog, false); + + bpf_program__set_autoload(skel->progs.test_uprobe_bench, true); + + err = uprobe_multi__load(skel); + if (!ASSERT_EQ(err, 0, "uprobe_multi__load")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + err = uprobe_multi__attach(skel); + if (!ASSERT_OK(err, "uprobe_multi__attach")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./uprobe_multi"); + + ASSERT_EQ(skel->bss->count, 50000, "uprobes_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + void test_uprobe_multi_test(void) { if (test__start_subtest("skel_api")) @@ -212,4 +266,6 @@ void test_uprobe_multi_test(void) test_attach_api_syms(); if (test__start_subtest("link_api")) test_link_api(); + if (test__start_subtest("bench_uprobe")) + test_bench_attach_uprobe(); } diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c index 1eeb9b7b9cad..cd73139dc881 100644 --- a/tools/testing/selftests/bpf/progs/uprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c @@ -89,3 +89,12 @@ int test_uretprobe_sleep(struct pt_regs *ctx) uprobe_multi_check(ctx, true, true); return 0; } + +int count; + +SEC("?uprobe.multi/./uprobe_multi:uprobe_multi_func_*") +int test_uprobe_bench(struct pt_regs *ctx) +{ + count++; + return 0; +} From patchwork Fri Jun 30 08:33:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297801 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0985FA932 for ; Fri, 30 Jun 2023 08:38:11 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6AD29C433C9; Fri, 30 Jun 2023 08:38:07 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114290; bh=8XYMbyO/MfneBvQhKY3HqR1MbYmNn9b6tydiDqDpGrE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=V65zLYQnBhOkJ0lcGS7S4YWFEV4CIzv7EZCd/N+3liW/xX6sLFtaABR7oix8MhKCQ 8Is9WO6qTBsXxoBOLk3Gn+75tlBmCySPmRSE1Z279OqCVIira1qeJI/CAhaIYusJsb B6vltQsNYK19AdyY8tU4R/CfJDUJK7KCVOrQp2BGRM8wNXGpp+yC4LsJLHQhOtbNtR LGl8JgSTJCRwZ6cVxxKjFfkFB5l6SgEFsZT4nknc7waXVDGTqM9XxKr+Wa35hILGe9 hbyx1PtoqyEtsUftXi3ls3iBzLP2pgDuNHGxIXw1gqcDlI+BTvjgAQAEgy73O2wgOK yxRitmXbHaxQQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 22/26] selftests/bpf: Add usdt_multi test program Date: Fri, 30 Jun 2023 10:33:40 +0200 Message-ID: <20230630083344.984305-23-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding usdt_multi test program that defines 50k usdts and will serve as attach point for uprobe_multi usdt bench test in following patch. Signed-off-by: Jiri Olsa --- tools/testing/selftests/bpf/Makefile | 5 +++++ tools/testing/selftests/bpf/usdt_multi.c | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 tools/testing/selftests/bpf/usdt_multi.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index acf7c9a29082..9762467cf0ba 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -568,6 +568,7 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ $(OUTPUT)/uprobe_multi \ + $(OUTPUT)/usdt_multi \ ima_setup.sh \ verify_sig_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) \ @@ -675,6 +676,10 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ +$(OUTPUT)/usdt_multi: usdt_multi.c + $(call msg,BINARY,,$@) + $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ + EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ diff --git a/tools/testing/selftests/bpf/usdt_multi.c b/tools/testing/selftests/bpf/usdt_multi.c new file mode 100644 index 000000000000..fedf856bad2b --- /dev/null +++ b/tools/testing/selftests/bpf/usdt_multi.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#define PROBE STAP_PROBE(test, usdt); + +#define PROBE10 PROBE PROBE PROBE PROBE PROBE \ + PROBE PROBE PROBE PROBE PROBE +#define PROBE100 PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 \ + PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 +#define PROBE1000 PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 \ + PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 +#define PROBE10000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 \ + PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 + +int main(void) +{ + PROBE10000 + PROBE10000 + PROBE10000 + PROBE10000 + PROBE10000 + return 0; +} From patchwork Fri Jun 30 08:33:41 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297802 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id EAD39A932 for ; Fri, 30 Jun 2023 08:38:22 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id DC20BC433C8; Fri, 30 Jun 2023 08:38:18 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114302; bh=uVaZqtTicYF9UZO+qN7Vxxu5txzKyAnr1YysTA5DfZw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=PovQMB0FIVZ9i43x4cLF4zahnSdSlJa+iyTqqCoOmylQDucSQPPrWRcC7yrkXiO1l aIetwLp52dvT43RZd6PJUk4gu1PJoDAbf+FkMc3OlZ7Ta/5wbEKYVMYycuPk9Bc5n6 9zeS73+ULuuGudPaEaNWQa30SpwHY7ipzgd0hnPGTq+RPhjXcnxONSjE3E7Ulc4Qp2 70QQpMUO3egcnsvKuYRoOAN6bJtVAVmybTrBsYED7miEWybgVlqQOUyJr+7vO499fu Zwe/kVyCgycWdxBjogErC8Gn4VDC46AnpVm6HgsL8GtLuUqkOzhuXyi5TISAD2wXB0 LZD1r1w1KdBhQ== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 23/26] selftests/bpf: Add usdt_multi bench test Date: Fri, 30 Jun 2023 10:33:41 +0200 Message-ID: <20230630083344.984305-24-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding test that attaches 50k usdt probes in usdt_multi binary. After the attach is done we run the binary and make sure we get proper amount of hits. With current uprobes: # perf stat --null ./test_progs -n 254/6 #254/6 uprobe_multi_test/bench_usdt:OK #254 uprobe_multi_test:OK Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED Performance counter stats for './test_progs -n 254/6': 1353.659680562 seconds time elapsed With uprobe_multi link: # perf stat --null ./test_progs -n 254/6 #254/6 uprobe_multi_test/bench_usdt:OK #254 uprobe_multi_test:OK Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED Performance counter stats for './test_progs -n 254/6': 0.322046364 seconds time elapsed Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 50 +++++++++++++++++++ .../selftests/bpf/progs/uprobe_multi_usdt.c | 16 ++++++ 2 files changed, 66 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 547d46965d70..b12dc1f992e5 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -4,6 +4,7 @@ #include #include "uprobe_multi.skel.h" #include "bpf/libbpf_elf.h" +#include "uprobe_multi_usdt.skel.h" static char test_data[] = "test_data"; @@ -256,6 +257,53 @@ static void test_bench_attach_uprobe(void) printf("%s: detached in %7.3lfs\n", __func__, detach_delta); } +static void test_bench_attach_usdt(void) +{ + struct uprobe_multi_usdt *skel = NULL; + long attach_start_ns, attach_end_ns; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + struct bpf_program *prog; + int err; + + skel = uprobe_multi_usdt__open(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi__open")) + goto cleanup; + + bpf_object__for_each_program(prog, skel->obj) + bpf_program__set_autoload(prog, false); + + bpf_program__set_autoload(skel->progs.usdt0, true); + + err = uprobe_multi_usdt__load(skel); + if (!ASSERT_EQ(err, 0, "uprobe_multi_usdt__load")) + goto cleanup; + + attach_start_ns = get_time_ns(); + + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./usdt_multi", + "test", "usdt", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt")) + goto cleanup; + + attach_end_ns = get_time_ns(); + + system("./usdt_multi"); + + ASSERT_EQ(skel->bss->count, 50000, "usdt_count"); + +cleanup: + detach_start_ns = get_time_ns(); + uprobe_multi_usdt__destroy(skel); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); +} + void test_uprobe_multi_test(void) { if (test__start_subtest("skel_api")) @@ -268,4 +316,6 @@ void test_uprobe_multi_test(void) test_link_api(); if (test__start_subtest("bench_uprobe")) test_bench_attach_uprobe(); + if (test__start_subtest("bench_usdt")) + test_bench_attach_usdt(); } diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c new file mode 100644 index 000000000000..9e1c33d0bd2f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +int count; + +SEC("usdt") +int usdt0(struct pt_regs *ctx) +{ + count++; + return 0; +} From patchwork Fri Jun 30 08:33:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297803 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BE0A6A932 for ; Fri, 30 Jun 2023 08:38:34 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id C5000C433C0; Fri, 30 Jun 2023 08:38:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114314; bh=6SXTWVOLI7wqDLoX13ipktDRhQL8JTRXvac7QW5dSm4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=oV2qcfsS4GEVBl+dIYupU1tPX3rex6Od4TbhHzFiaUpxhE2KZlBgf09zCsOBUzi1f nt5ZNv3cQgPt4riCfRk/eujiYQJTGjww9Fa6o25LVJUUHvy6Ihnm0EmsXiFaN1GL9i X7wqXQOca8YVPo1xXHOStWkc/hzuhBpnE++wqgQkJPgdeXu3HRzbi1CPD2rFKZQyxI 26H5NgqXF8ymdA2sdyPcgMo6SwGextb40Sj7TprxM4idJfsBLSPNyGqj60irenht4f NF6mIWm37QF7PpiuwPtrdto/JXP/tmAbnDUX6HZwRulw0oE+bI+oF77JDvPhmQ5JkQ 0QX5XZ4lBOMhg== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 24/26] selftests/bpf: Add uprobe_multi cookie test Date: Fri, 30 Jun 2023 10:33:42 +0200 Message-ID: <20230630083344.984305-25-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding test for cookies setup/retrieval in uprobe_link uprobes and making sure bpf_get_attach_cookie works properly. Signed-off-by: Jiri Olsa --- .../selftests/bpf/prog_tests/bpf_cookie.c | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 26b2d1bffdfd..4162d1724b79 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -11,6 +11,7 @@ #include #include "test_bpf_cookie.skel.h" #include "kprobe_multi.skel.h" +#include "uprobe_multi.skel.h" /* uprobe attach point */ static noinline void trigger_func(void) @@ -239,6 +240,81 @@ static void kprobe_multi_attach_api_subtest(void) bpf_link__destroy(link1); kprobe_multi__destroy(skel); } + +/* defined in prog_tests/uprobe_multi_test.c */ +void uprobe_multi_func_1(void); +void uprobe_multi_func_2(void); +void uprobe_multi_func_3(void); + +static void uprobe_multi_test_run(struct uprobe_multi *skel) +{ + skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; + skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; + skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result"); + + ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result"); + ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result"); +} + +static void uprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct uprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); + const char *syms[3] = { + "uprobe_multi_func_1", + "uprobe_multi_func_2", + "uprobe_multi_func_3", + }; + __u64 cookies[3]; + + cookies[0] = 3; /* uprobe_multi_func_1 */ + cookies[1] = 1; /* uprobe_multi_func_2 */ + cookies[2] = 2; /* uprobe_multi_func_3 */ + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = &cookies[0]; + + skel = uprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_multi")) + goto cleanup; + + link1 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + cookies[0] = 2; /* uprobe_multi_func_1 */ + cookies[1] = 3; /* uprobe_multi_func_2 */ + cookies[2] = 1; /* uprobe_multi_func_3 */ + + opts.retprobe = true; + link2 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe, -1, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) + goto cleanup; + + uprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + uprobe_multi__destroy(skel); +} + static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); @@ -515,6 +591,8 @@ void test_bpf_cookie(void) kprobe_multi_attach_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); + if (test__start_subtest("multi_uprobe_attach_api")) + uprobe_multi_attach_api_subtest(); if (test__start_subtest("tracepoint")) tp_subtest(skel); if (test__start_subtest("perf_event")) From patchwork Fri Jun 30 08:33:43 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297804 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 900311FB8 for ; Fri, 30 Jun 2023 08:38:46 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A2B6BC433C8; Fri, 30 Jun 2023 08:38:42 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114326; bh=iktlgaPzL5DrafXvZV0rtaI87SPZfkpzn0ahDWy2gcg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=t+MTW8LBfSQxyETmcansloAltvhRoYVA8CqQ8D7edcVAzge5/7lSPFC8SLGzv5EEF x1/ljcOr6NUv3uMaH9tQjzhqz/GmqEnjMYmYsJego7k5TsPluFKRdyEL4OFsN2mWoh pXEu/hqg6/sOAeV7yX10M1jJp4D5+7XK8h7cZntdFZupL5ulcnNSz3A+nQIaUw89ZZ qQJGfcNOJv0qAI27+W5uh8S7OCCPMYG0b9MIg+3bqx+2dFFA/dCNhQxy4trPO5P4Mz 9jUKjrlzJwqduNkf6ydCbZOY5DTC7015iSFCATBXOCSesV+6mDpq+dRDxQskFLuWAP d6tzDAqxePloA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 25/26] selftests/bpf: Add uprobe_multi pid filter tests Date: Fri, 30 Jun 2023 10:33:43 +0200 Message-ID: <20230630083344.984305-26-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Running api and link tests also with pid filter and checking the probe gets executed only for specific pid. Spawning extra process to trigger attached uprobes and checking we get correct counts from executed programs. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 131 ++++++++++++++++-- .../selftests/bpf/progs/uprobe_multi.c | 6 +- 2 files changed, 125 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index b12dc1f992e5..8ca7a45e21e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -23,14 +23,86 @@ noinline void uprobe_multi_func_3(void) asm volatile (""); } -static void uprobe_multi_test_run(struct uprobe_multi *skel) +struct child { + int go[2]; + int pid; +}; + +static void release_child(struct child *child) +{ + int child_status; + + if (!child) + return; + close(child->go[1]); + close(child->go[0]); + if (child->pid > 0) + waitpid(child->pid, &child_status, 0); +} + +static void kick_child(struct child *child) +{ + char c = 1; + + if (child) { + write(child->go[1], &c, 1); + release_child(child); + } + fflush(NULL); +} + +static struct child *spawn_child(void) +{ + static struct child child; + int err; + int c; + + /* pid filter */ + if (!ASSERT_OK(pipe(child.go), "pipe")) + return NULL; + + child.pid = fork(); + if (child.pid < 0) { + release_child(&child); + return NULL; + } + + /* child */ + if (child.pid == 0) { + close(child.go[1]); + fflush(NULL); + /* wait for parent's kick */ + err = read(child.go[0], &c, 1); + if (!ASSERT_EQ(err, 1, "child_read_pipe")) + exit(err); + + uprobe_multi_func_1(); + uprobe_multi_func_2(); + uprobe_multi_func_3(); + + exit(errno); + } + + return &child; +} + +static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) { skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2; skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3; skel->bss->user_ptr = test_data; - skel->bss->pid = getpid(); + + /* + * Disable pid check in bpf program if we are pid filter test, + * because the probe should be executed only by child->pid + * passed at the probe attach. + */ + skel->bss->pid = child ? 0 : getpid(); + + if (child) + kick_child(child); /* trigger all probes */ uprobe_multi_func_1(); @@ -50,6 +122,9 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel) ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result"); ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); + + if (child) + ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); } static void test_skel_api(void) @@ -65,17 +140,19 @@ static void test_skel_api(void) if (!ASSERT_OK(err, "uprobe_multi__attach")) goto cleanup; - uprobe_multi_test_run(skel); + uprobe_multi_test_run(skel, NULL); cleanup: uprobe_multi__destroy(skel); } static void -test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts, + struct child *child) { struct bpf_link *link1 = NULL, *link2 = NULL; struct bpf_link *link3 = NULL, *link4 = NULL; + pid_t pid = child ? child->pid : -1; struct uprobe_multi *skel = NULL; skel = uprobe_multi__open_and_load(); @@ -83,30 +160,30 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi goto cleanup; opts->retprobe = false; - link1 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe, -1, + link1 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe, pid, binary, pattern, opts); if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) goto cleanup; opts->retprobe = true; - link2 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe, -1, + link2 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe, pid, binary, pattern, opts); if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) goto cleanup; opts->retprobe = false; - link3 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_sleep, -1, + link3 = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_sleep, pid, binary, pattern, opts); if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi")) goto cleanup; opts->retprobe = true; - link4 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_sleep, -1, + link4 = bpf_program__attach_uprobe_multi(skel->progs.test_uretprobe_sleep, pid, binary, pattern, opts); if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) goto cleanup; - uprobe_multi_test_run(skel); + uprobe_multi_test_run(skel, child); cleanup: bpf_link__destroy(link4); @@ -116,6 +193,22 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi uprobe_multi__destroy(skel); } +static void +test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts) +{ + struct child *child; + + /* no pid filter */ + __test_attach_api(binary, pattern, opts, NULL); + + /* pid filter */ + child = spawn_child(); + if (!child) + return; + + __test_attach_api(binary, pattern, opts, child); +} + static void test_attach_api_pattern(void) { LIBBPF_OPTS(bpf_uprobe_multi_opts, opts); @@ -138,7 +231,7 @@ static void test_attach_api_syms(void) test_attach_api("/proc/self/exe", NULL, &opts); } -static void test_link_api(void) +static void __test_link_api(struct child *child) { int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1; LIBBPF_OPTS(bpf_link_create_opts, opts); @@ -159,6 +252,7 @@ static void test_link_api(void) opts.uprobe_multi.path = path; opts.uprobe_multi.offsets = offsets; opts.uprobe_multi.cnt = ARRAY_SIZE(syms); + opts.uprobe_multi.pid = child ? child->pid : 0; skel = uprobe_multi__open_and_load(); if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load")) @@ -187,7 +281,7 @@ static void test_link_api(void) link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); if (!ASSERT_GE(link2_fd, 0, "link4_fd")) goto cleanup; - uprobe_multi_test_run(skel); + uprobe_multi_test_run(skel, child); cleanup: if (link1_fd >= 0) @@ -203,6 +297,21 @@ static void test_link_api(void) free(offsets); } +void test_link_api(void) +{ + struct child *child; + + /* no pid filter */ + __test_link_api(NULL); + + /* pid filter */ + child = spawn_child(); + if (!child) + return; + + __test_link_api(child); +} + static inline __u64 get_time_ns(void) { struct timespec t; diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c index cd73139dc881..1086312b5d1e 100644 --- a/tools/testing/selftests/bpf/progs/uprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c @@ -21,6 +21,8 @@ __u64 uretprobe_multi_func_3_result = 0; __u64 uprobe_multi_sleep_result = 0; int pid = 0; +int child_pid = 0; + bool test_cookie = false; void *user_ptr = 0; @@ -34,7 +36,9 @@ static __always_inline bool verify_sleepable_user_copy(void) static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep) { - if (bpf_get_current_pid_tgid() >> 32 != pid) + child_pid = bpf_get_current_pid_tgid() >> 32; + + if (pid && child_pid != pid) return; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; From patchwork Fri Jun 30 08:33:44 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13297805 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BB8AE1FB8 for ; Fri, 30 Jun 2023 08:38:58 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6EC1FC433CA; Fri, 30 Jun 2023 08:38:54 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1688114338; bh=0Wo4wjjYHpGWo2l5Nll/aWbzb19c+GyKhDECdbc5v9w=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QHsp/WoaugcWOsqv6DJZ57ClmdaUuQhFddvmMqvDJ9gcd096X7Rq5Qnw5TrnNiUc5 370rENsrVPRYZIvvljsGWoIugHxYQTAGLdrdR2cD1fm8UwgHNwXSZXxcfstZO2DeNw yG9yHos1vqmYKRY1PNWDaFuQNUVLVYDkaTa2KUglA24liPtO2WBYCKlzOGPUlPWS3Y 8MMxEtl+PDREa7tgpZXXyPfWNTNBh0hiI5Xswi/MJ47gRUqcwdLuMbpDU7cNbJJS7I Atlen6t7opaf6scTewYb9RfL72c6NT7Tb7WUGac4PGCaEaAM+2txnqCXTCYUU+IKLW VNsYkmwwHny+A== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo Subject: [PATCHv3 bpf-next 26/26] selftests/bpf: Add extra link to uprobe_multi tests Date: Fri, 30 Jun 2023 10:33:44 +0200 Message-ID: <20230630083344.984305-27-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230630083344.984305-1-jolsa@kernel.org> References: <20230630083344.984305-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Attaching extra program to same functions system wide for api and link tests. This way we can test the pid filter works properly when there's extra system wide consumer on the same uprobe that will trigger the original uprobe handler. We expect to have the same counts as before. Signed-off-by: Jiri Olsa --- .../bpf/prog_tests/uprobe_multi_test.c | 19 +++++++++++++++++++ .../selftests/bpf/progs/uprobe_multi.c | 6 ++++++ 2 files changed, 25 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index 8ca7a45e21e6..39bd16786cd0 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -153,6 +153,7 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul struct bpf_link *link1 = NULL, *link2 = NULL; struct bpf_link *link3 = NULL, *link4 = NULL; pid_t pid = child ? child->pid : -1; + struct bpf_link *link_extra = NULL; struct uprobe_multi *skel = NULL; skel = uprobe_multi__open_and_load(); @@ -183,9 +184,16 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe")) goto cleanup; + opts->retprobe = false; + link_extra = bpf_program__attach_uprobe_multi(skel->progs.test_uprobe_extra, -1, + binary, pattern, opts); + if (!ASSERT_OK_PTR(link_extra, "bpf_program__attach_uprobe_multi")) + goto cleanup; + uprobe_multi_test_run(skel, child); cleanup: + bpf_link__destroy(link_extra); bpf_link__destroy(link4); bpf_link__destroy(link3); bpf_link__destroy(link2); @@ -243,6 +251,7 @@ static void __test_link_api(struct child *child) "uprobe_multi_func_2", "uprobe_multi_func_3", }; + int link_extra_fd = -1; int err; err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); @@ -281,6 +290,14 @@ static void __test_link_api(struct child *child) link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); if (!ASSERT_GE(link2_fd, 0, "link4_fd")) goto cleanup; + + opts.kprobe_multi.flags = 0; + opts.uprobe_multi.pid = 0; + prog_fd = bpf_program__fd(skel->progs.test_uprobe_extra); + link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); + if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd")) + goto cleanup; + uprobe_multi_test_run(skel, child); cleanup: @@ -292,6 +309,8 @@ static void __test_link_api(struct child *child) close(link3_fd); if (link4_fd >= 0) close(link4_fd); + if (link_extra_fd >= 0) + close(link_extra_fd); uprobe_multi__destroy(skel); free(offsets); diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c index 1086312b5d1e..8ee957670303 100644 --- a/tools/testing/selftests/bpf/progs/uprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c @@ -102,3 +102,9 @@ int test_uprobe_bench(struct pt_regs *ctx) count++; return 0; } + +SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*") +int test_uprobe_extra(struct pt_regs *ctx) +{ + return 0; +}