From patchwork Thu Nov 9 09:28:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450836 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A62FE11C92 for ; Thu, 9 Nov 2023 09:28:54 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="cVzg02K2" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 64C99C433C8; Thu, 9 Nov 2023 09:28:51 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522134; bh=ltR3BdlouSxCGOR4eQ7j2p0cTYtgZixLT+uPzxkoPgg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cVzg02K258JohgD5/ZcCmTqNPayEEbPU17dumtJW3AF87igxh0PinzBCXse7X4tUf MTVXjRwQ7nxltAz1oWLv2lcx/2eGYe3m/j4EX9AYwRwEnP5zOcyDZAkacsKZQMOC9t 5XkL6lGrRZkvTCU7U/kq2852d9Wz/WcTpbXcf7OSuxkIY90tWTnU75O8C5MbFS6dFy nwB2+bs8AZAQjOvodYgHs0qD8nBX6fMwETKSwTTwE2AKSBLR2ZN/d19tzsLq4zGQnV mr0dNekntk0wMie6ZMQO46xItjU3GsFACVTA+7MVHWmxtt883cQhutuZenJs8s+lj/ FQqmuJvaSrS2Q== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: Song Liu , bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 1/6] libbpf: Add st_type argument to elf_resolve_syms_offsets function Date: Thu, 9 Nov 2023 10:28:33 +0100 Message-ID: <20231109092838.721233-2-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net We need to get offsets for static variables in following changes, so making elf_resolve_syms_offsets to take st_type value as argument and passing it to elf_sym_iter_new. Acked-by: Song Liu Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- tools/lib/bpf/elf.c | 5 +++-- tools/lib/bpf/libbpf.c | 2 +- tools/lib/bpf/libbpf_internal.h | 3 ++- tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index 2a62bf411bb3..b02faec748a5 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -407,7 +407,8 @@ static int symbol_cmp(const void *a, const void *b) * size, that needs to be released by the caller. */ int elf_resolve_syms_offsets(const char *binary_path, int cnt, - const char **syms, unsigned long **poffsets) + const char **syms, unsigned long **poffsets, + int st_type) { int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB }; int err = 0, i, cnt_done = 0; @@ -438,7 +439,7 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt, struct elf_sym_iter iter; struct elf_sym *sym; - err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC); + err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type); if (err == -ENOENT) continue; if (err) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e067be95da3c..ea9b8158c20d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -11447,7 +11447,7 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog, return libbpf_err_ptr(err); offsets = resolved_offsets; } else if (syms) { - err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets); + err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC); if (err < 0) return libbpf_err_ptr(err); offsets = resolved_offsets; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f0f08635adb0..b5d334754e5d 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -594,7 +594,8 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd); void elf_close(struct elf_fd *elf_fd); int elf_resolve_syms_offsets(const char *binary_path, int cnt, - const char **syms, unsigned long **poffsets); + const char **syms, unsigned long **poffsets, + int st_type); int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern, unsigned long **poffsets, size_t *pcnt); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c index cd051d3901a9..ece260cf2c0b 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c @@ -249,7 +249,7 @@ static void __test_link_api(struct child *child) int link_extra_fd = -1; int err; - err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets); + err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets, STT_FUNC); if (!ASSERT_OK(err, "elf_resolve_syms_offsets")) return; From patchwork Thu Nov 9 09:28:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450837 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 409DB11CA9 for ; Thu, 9 Nov 2023 09:29:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="cVs+Z36o" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 562B9C433C7; Thu, 9 Nov 2023 09:29:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522145; bh=YdyESkV+VWejXGpCD4/JhWwNi6fVHQTmOH4gR6FFg6Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cVs+Z36oAki99MNQQAU+4oVFM5vRnotwk9LQkb06xqzlcyk5/QCPD7kgiBgBkb395 ZiX4hEj8gUCcNO+C9zBvqBwXOiTnHM6BKEaTRyc+hcRJaUiP41zm5bp1LDU8+OJX8E 1HKn7+doGw3JlrZLbdjlhO+tZ2iBgozDKzckoHiyKqytqRfU/NfIDD2phsKGqnXRMy 4P9TOitZMnKlkHhkoAFio44axURwTMXVkMlp9rMmcldQ1PT1cX65cC9PTX16IdzdiH MW3/18jr05fK81ysBlaPJDECnviDfJQlrTMl5FfCDNrm/YTnXffGp82C9i4oBDNMez sI/P4Y79MrTlA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: Song Liu , bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 2/6] bpf: Store ref_ctr_offsets values in bpf_uprobe array Date: Thu, 9 Nov 2023 10:28:34 +0100 Message-ID: <20231109092838.721233-3-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net We will need to return ref_ctr_offsets values through link_info interface in following change, so we need to keep them around. Storing ref_ctr_offsets values directly into bpf_uprobe array. Acked-by: Song Liu Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko --- kernel/trace/bpf_trace.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d525a22b8d56..52c1ec3a0467 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3035,6 +3035,7 @@ struct bpf_uprobe_multi_link; struct bpf_uprobe { struct bpf_uprobe_multi_link *link; loff_t offset; + unsigned long ref_ctr_offset; u64 cookie; struct uprobe_consumer consumer; }; @@ -3174,7 +3175,6 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr { struct bpf_uprobe_multi_link *link = NULL; unsigned long __user *uref_ctr_offsets; - unsigned long *ref_ctr_offsets = NULL; struct bpf_link_primer link_primer; struct bpf_uprobe *uprobes = NULL; struct task_struct *task = NULL; @@ -3247,18 +3247,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (!uprobes || !link) goto error_free; - if (uref_ctr_offsets) { - ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL); - if (!ref_ctr_offsets) - goto error_free; - } - for (i = 0; i < cnt; i++) { if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) { err = -EFAULT; goto error_free; } - if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) { + if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) { err = -EFAULT; goto error_free; } @@ -3289,7 +3283,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr for (i = 0; i < cnt; i++) { err = uprobe_register_refctr(d_real_inode(link->path.dentry), uprobes[i].offset, - ref_ctr_offsets ? ref_ctr_offsets[i] : 0, + uprobes[i].ref_ctr_offset, &uprobes[i].consumer); if (err) { bpf_uprobe_unregister(&path, uprobes, i); @@ -3301,11 +3295,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (err) goto error_free; - kvfree(ref_ctr_offsets); return bpf_link_settle(&link_primer); error_free: - kvfree(ref_ctr_offsets); kvfree(uprobes); kfree(link); if (task) From patchwork Thu Nov 9 09:28:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450838 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E733E134B5 for ; Thu, 9 Nov 2023 09:29:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MIFg24r6" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4B954C433C7; Thu, 9 Nov 2023 09:29:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522156; bh=BhekNbSNjQ8spU5qsJpHNIETk5H2uAxkpQjJ2U+09eA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=MIFg24r6dVlDmA1LgLWTMTpl3LjCEunTYqRj/gk9+s6hv1YwgidAZHR/769dvyBNU XHi5qM96+B3wwtczs9BTEH1RHaAL6yjkCAEJLk5kuhTSNtrfwaOvvYouIAVEUmPROH eq53uZW9GsLnoQRMdXquw5xCsllZoSXFTFchZsR4XdDvowzadz/7+4ffTJpVFTsx6l p3JbrnnenQhdYMEigyoPOawKA8zC8vQstgdt0sDon0s65/Y5RT/bbz4xrk2JKB48d1 u58GMnzbhQf7uEyw9Re2GtWbu9h/aoIxKbMQE7Ce1Gx80710JSF1LJB4QdBQ0/CUlM VSS2TahKZTsbA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 3/6] bpf: Add link_info support for uprobe multi link Date: Thu, 9 Nov 2023 10:28:35 +0100 Message-ID: <20231109092838.721233-4-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support to get uprobe_link details through bpf_link_info interface. Adding new struct uprobe_multi to struct bpf_link_info to carry the uprobe_multi link details. The uprobe_multi.count is passed from user space to denote size of array fields (offsets/ref_ctr_offsets/cookies). The actual array size is stored back to uprobe_multi.count (allowing user to find out the actual array size) and array fields are populated up to the user passed size. All the non-array fields (path/count/flags/pid) are always set. Signed-off-by: Jiri Olsa --- include/uapi/linux/bpf.h | 10 +++++ kernel/trace/bpf_trace.c | 69 ++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 10 +++++ 3 files changed, 89 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0f6cdf52b1da..05b355da4508 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6556,6 +6556,16 @@ struct bpf_link_info { __u32 flags; __u64 missed; } kprobe_multi; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 path_size; + __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ + __u32 flags; + __u32 pid; + } uprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ __u32 :32; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 52c1ec3a0467..1ea54f3b3f73 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3046,6 +3046,7 @@ struct bpf_uprobe_multi_link { u32 cnt; struct bpf_uprobe *uprobes; struct task_struct *task; + u32 flags; }; struct bpf_uprobe_multi_run_ctx { @@ -3085,9 +3086,76 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link) kfree(umulti_link); } +static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets); + u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies); + u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets); + u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path); + u32 upath_size = info->uprobe_multi.path_size; + struct bpf_uprobe_multi_link *umulti_link; + u32 ucount = info->uprobe_multi.count; + int err = 0, i; + long left; + + if (!upath ^ !upath_size) + return -EINVAL; + + if (!uoffsets ^ !ucount) + return -EINVAL; + + umulti_link = container_of(link, struct bpf_uprobe_multi_link, link); + info->uprobe_multi.count = umulti_link->cnt; + info->uprobe_multi.flags = umulti_link->flags; + info->uprobe_multi.pid = umulti_link->task ? + task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; + + if (upath) { + char *p, *buf; + + upath_size = min_t(u32, upath_size, PATH_MAX); + + buf = kmalloc(upath_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = d_path(&umulti_link->path, buf, upath_size); + if (IS_ERR(p)) { + kfree(buf); + return -ENOSPC; + } + left = copy_to_user(upath, p, buf + upath_size - p); + kfree(buf); + if (left) + return -EFAULT; + } + + if (!uoffsets) + return 0; + + if (ucount < umulti_link->cnt) + err = -ENOSPC; + else + ucount = umulti_link->cnt; + + for (i = 0; i < ucount; i++) { + if (put_user(umulti_link->uprobes[i].offset, uoffsets + i)) + return -EFAULT; + if (uref_ctr_offsets && + put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) + return -EFAULT; + if (ucookies && + put_user(umulti_link->uprobes[i].cookie, ucookies + i)) + return -EFAULT; + } + + return err; +} + static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { .release = bpf_uprobe_multi_link_release, .dealloc = bpf_uprobe_multi_link_dealloc, + .fill_link_info = bpf_uprobe_multi_link_fill_link_info, }; static int uprobe_prog_run(struct bpf_uprobe *uprobe, @@ -3276,6 +3344,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr link->uprobes = uprobes; link->path = path; link->task = task; + link->flags = flags; bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, &bpf_uprobe_multi_link_lops, prog); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0f6cdf52b1da..05b355da4508 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6556,6 +6556,16 @@ struct bpf_link_info { __u32 flags; __u64 missed; } kprobe_multi; + struct { + __aligned_u64 path; + __aligned_u64 offsets; + __aligned_u64 ref_ctr_offsets; + __aligned_u64 cookies; + __u32 path_size; + __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */ + __u32 flags; + __u32 pid; + } uprobe_multi; struct { __u32 type; /* enum bpf_perf_event_type */ __u32 :32; From patchwork Thu Nov 9 09:28:36 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450839 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 22F1F11CA8 for ; Thu, 9 Nov 2023 09:29:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="pU+mKM85" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D2BA6C433C7; Thu, 9 Nov 2023 09:29:23 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522167; bh=dbGbaEErOc4czrBpq1tlTq0xbaHTrjxmyXUNrd6ckEc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pU+mKM85CC66mTs0UJP0I7qYxOJ9kO3s2wft3ba6xwkZ3KqxxcsQo+U4HzK/Y2jQs uS+R9sLKuA1RrmysQwlVu4D66nz8k5+b/zx7jTWC6jSsvpBQ5Ocf0zje21/fninKxk ND+qqjJiIdQBxPCtlYCXsu1gzzUCc/9E8HFnDPecpU43z+npfDkLhzuFQeAnV2tv1d 5Pm2nDLNmN4ZE8Rscaxtt8BApT2aKq7ophoijrH6fPIej14R0nghOfbVT30pntxenu VQ5sp5AkZuQQ/tvTd59jeKp+fXqa+LibmepEK1wUPy0u0uzuHp6U9UGTUJZYB+h/Pd mDMA0M7m3gFRA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 4/6] selftests/bpf: Use bpf_link__destroy in fill_link_info tests Date: Thu, 9 Nov 2023 10:28:36 +0100 Message-ID: <20231109092838.721233-5-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net The fill_link_info test keeps skeleton open and just creates various links. We are wrongly calling bpf_link__detach after each test to close them, we need to call bpf_link__destroy. Signed-off-by: Jiri Olsa Acked-by: Yafang Shao --- .../selftests/bpf/prog_tests/fill_link_info.c | 44 ++++++++++--------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index 97142a4db374..9294cb8d7743 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -140,14 +140,14 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, .retprobe = type == BPF_PERF_EVENT_KRETPROBE, ); ssize_t entry_offset = 0; + struct bpf_link *link; int link_fd, err; - skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, - KPROBE_FUNC, &opts); - if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe")) + link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts); + if (!ASSERT_OK_PTR(link, "attach_kprobe")) return; - link_fd = bpf_link__fd(skel->links.kprobe_run); + link_fd = bpf_link__fd(link); if (!invalid) { /* See also arch_adjust_kprobe_addr(). */ if (skel->kconfig->CONFIG_X86_KERNEL_IBT) @@ -157,39 +157,41 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, } else { kprobe_fill_invalid_user_buffer(link_fd); } - bpf_link__detach(skel->links.kprobe_run); + bpf_link__destroy(link); } static void test_tp_fill_link_info(struct test_fill_link_info *skel) { + struct bpf_link *link; int link_fd, err; - skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); - if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp")) + link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); + if (!ASSERT_OK_PTR(link, "attach_tp")) return; - link_fd = bpf_link__fd(skel->links.tp_run); + link_fd = bpf_link__fd(link); err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0); ASSERT_OK(err, "verify_perf_link_info"); - bpf_link__detach(skel->links.tp_run); + bpf_link__destroy(link); } static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, enum bpf_perf_event_type type) { + struct bpf_link *link; int link_fd, err; - skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run, - type == BPF_PERF_EVENT_URETPROBE, - 0, /* self pid */ - UPROBE_FILE, uprobe_offset); - if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe")) + link = bpf_program__attach_uprobe(skel->progs.uprobe_run, + type == BPF_PERF_EVENT_URETPROBE, + 0, /* self pid */ + UPROBE_FILE, uprobe_offset); + if (!ASSERT_OK_PTR(link, "attach_uprobe")) return; - link_fd = bpf_link__fd(skel->links.uprobe_run); + link_fd = bpf_link__fd(link); err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); ASSERT_OK(err, "verify_perf_link_info"); - bpf_link__detach(skel->links.uprobe_run); + bpf_link__destroy(link); } static int verify_kmulti_link_info(int fd, bool retprobe) @@ -278,24 +280,24 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, bool retprobe, bool invalid) { LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct bpf_link *link; int link_fd, err; opts.syms = kmulti_syms; opts.cnt = KMULTI_CNT; opts.retprobe = retprobe; - skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, - NULL, &opts); - if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi")) + link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); + if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) return; - link_fd = bpf_link__fd(skel->links.kmulti_run); + link_fd = bpf_link__fd(link); if (!invalid) { err = verify_kmulti_link_info(link_fd, retprobe); ASSERT_OK(err, "verify_kmulti_link_info"); } else { verify_kmulti_invalid_user_buffer(link_fd); } - bpf_link__detach(skel->links.kmulti_run); + bpf_link__destroy(link); } void test_fill_link_info(void) From patchwork Thu Nov 9 09:28:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450840 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 248DD11CA8 for ; Thu, 9 Nov 2023 09:29:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="e4lefJJp" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5B203C433C7; Thu, 9 Nov 2023 09:29:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522177; bh=LH0mYtbDVT0IO10iwbVOKKmkkutJALsXrVibOnGA1ow=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=e4lefJJphJbvb/1xEQTLCErLjg8M10GUiTLoChPdsKVsa91sOajce0A3X6Os/NrDS AiYFm7Ws6qYFsO1acSG+JwIQuYMVweL6ZUL7WFPqGBVPRWNm6yERNTrIUhWqHMhY9+ fJ22Ym1/USJbpn2wqKBcIqax+2XvsgxmIvVJatc8ZjnAY6lvPvtUaflx5E9MsGGzId nFHtas06XYsmIDxPD3xnhlAeJafl+VXDqC3n+tztKS6GPW+D3165hsJ/SAVxYAs3jn UwRJWhmVc4V7tQxGqkYIeGahNd8DeKdjzFChItu/Qav0CCWXEfthOjLrGNGzKE4u42 eCa1g0GHljzaA== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: Song Liu , bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 5/6] selftests/bpf: Add link_info test for uprobe_multi link Date: Thu, 9 Nov 2023 10:28:37 +0100 Message-ID: <20231109092838.721233-6-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding fill_link_info test for uprobe_multi link. Setting up uprobes with bogus ref_ctr_offsets and cookie values to test all the bpf_link_info::uprobe_multi fields. Acked-by: Song Liu Signed-off-by: Jiri Olsa --- .../selftests/bpf/prog_tests/fill_link_info.c | 179 ++++++++++++++++++ .../selftests/bpf/progs/test_fill_link_info.c | 6 + 2 files changed, 185 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index 9294cb8d7743..86652622afc2 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -7,6 +7,7 @@ #include #include "trace_helpers.h" #include "test_fill_link_info.skel.h" +#include "bpf/libbpf_internal.h" #define TP_CAT "sched" #define TP_NAME "sched_switch" @@ -300,6 +301,177 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, bpf_link__destroy(link); } +/* Initialize semaphore variables so they don't end up in bss + * section and we could get retrieve their offsets. + */ +static short uprobe_link_info_sema_1 = 1; +static short uprobe_link_info_sema_2 = 1; +static short uprobe_link_info_sema_3 = 1; + +noinline void uprobe_link_info_func_1(void) +{ + uprobe_link_info_sema_1++; + asm volatile (""); +} + +noinline void uprobe_link_info_func_2(void) +{ + uprobe_link_info_sema_2++; + asm volatile (""); +} + +noinline void uprobe_link_info_func_3(void) +{ + uprobe_link_info_sema_3++; + asm volatile (""); +} + +static int +verify_umulti_link_info(int fd, bool retprobe, __u64 *offsets, + __u64 *cookies, __u64 *ref_ctr_offsets) +{ + char path[PATH_MAX], path_buf[PATH_MAX]; + struct bpf_link_info info; + __u32 len = sizeof(info); + __u64 ref_ctr_offsets_buf[3]; + __u64 offsets_buf[3]; + __u64 cookies_buf[3]; + int i, err; + + memset(path, 0, sizeof(path)); + err = readlink("/proc/self/exe", path, sizeof(path)); + if (!ASSERT_NEQ(err, -1, "readlink")) + return -1; + + memset(&info, 0, sizeof(info)); + info.uprobe_multi.path = ptr_to_u64(path_buf); + info.uprobe_multi.path_size = sizeof(path_buf); + +again: + err = bpf_link_get_info_by_fd(fd, &info, &len); + if (!ASSERT_OK(err, "bpf_link_get_info_by_fd")) + return -1; + + if (!ASSERT_EQ(info.type, BPF_LINK_TYPE_UPROBE_MULTI, "info.type")) + return -1; + + ASSERT_EQ(info.uprobe_multi.pid, getpid(), "info.uprobe_multi.pid"); + ASSERT_EQ(info.uprobe_multi.count, 3, "info.uprobe_multi.count"); + ASSERT_EQ(info.uprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN, + retprobe, "info.uprobe_multi.flags.retprobe"); + ASSERT_STREQ(path_buf, path, "info.uprobe_multi.path"); + + if (!info.uprobe_multi.offsets) { + info.uprobe_multi.offsets = ptr_to_u64(offsets_buf); + info.uprobe_multi.cookies = ptr_to_u64(cookies_buf); + info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets_buf); + goto again; + } + + for (i = 0; i < info.uprobe_multi.count; i++) { + ASSERT_EQ(offsets_buf[i], offsets[i], "info.uprobe_multi.offsets"); + ASSERT_EQ(cookies_buf[i], cookies[i], "info.uprobe_multi.cookies"); + ASSERT_EQ(ref_ctr_offsets_buf[i], ref_ctr_offsets[i], "info.uprobe_multi.ref_ctr_offsets"); + } + return 0; +} + +static void verify_umulti_invalid_user_buffer(int fd) +{ + struct bpf_link_info info; + __u32 len = sizeof(info); + __u64 offsets[3]; + int err; + + /* upath_size defined, not path */ + memset(&info, 0, sizeof(info)); + info.uprobe_multi.path_size = 3; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "failed_upath_size"); + + /* path has wrong pointer */ + memset(&info, 0, sizeof(info)); + info.uprobe_multi.path_size = PATH_MAX; + info.uprobe_multi.path = 123; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "failed_bad_path_ptr"); + + /* count defined, not offsets */ + memset(&info, 0, sizeof(info)); + info.uprobe_multi.count = 3; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "failed_count"); + + /* offsets not big enough */ + memset(&info, 0, sizeof(info)); + info.uprobe_multi.offsets = ptr_to_u64(offsets); + info.uprobe_multi.count = 2; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -ENOSPC, "failed_small_count"); + + /* offsets has wrong pointer */ + memset(&info, 0, sizeof(info)); + info.uprobe_multi.offsets = 123; + info.uprobe_multi.count = 3; + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "failed_wrong_offsets"); +} + +static void test_uprobe_multi_fill_link_info(struct test_fill_link_info *skel, + bool retprobe, bool invalid) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, + .retprobe = retprobe, + ); + const char *syms[3] = { + "uprobe_link_info_func_1", + "uprobe_link_info_func_2", + "uprobe_link_info_func_3", + }; + __u64 cookies[3] = { + 0xdead, + 0xbeef, + 0xcafe, + }; + const char *sema[3] = { + "uprobe_link_info_sema_1", + "uprobe_link_info_sema_2", + "uprobe_link_info_sema_3", + }; + __u64 *offsets, *ref_ctr_offsets; + int link_fd, err; + + err = elf_resolve_syms_offsets("/proc/self/exe", 3, sema, + (unsigned long **) &ref_ctr_offsets, STT_OBJECT); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object")) + return; + + err = elf_resolve_syms_offsets("/proc/self/exe", 3, syms, + (unsigned long **) &offsets, STT_FUNC); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_func")) + return; + + opts.syms = syms; + opts.cookies = &cookies[0]; + opts.ref_ctr_offsets = (unsigned long *) &ref_ctr_offsets[0]; + opts.cnt = ARRAY_SIZE(syms); + + skel->links.umulti_run = bpf_program__attach_uprobe_multi(skel->progs.umulti_run, 0, + "/proc/self/exe", NULL, &opts); + if (!ASSERT_OK_PTR(skel->links.umulti_run, "bpf_program__attach_uprobe_multi")) + goto out; + + link_fd = bpf_link__fd(skel->links.umulti_run); + if (invalid) + verify_umulti_invalid_user_buffer(link_fd); + else + verify_umulti_link_info(link_fd, retprobe, offsets, cookies, ref_ctr_offsets); + + bpf_link__detach(skel->links.umulti_run); +out: + free(offsets); +} + void test_fill_link_info(void) { struct test_fill_link_info *skel; @@ -339,6 +511,13 @@ void test_fill_link_info(void) if (test__start_subtest("kprobe_multi_invalid_ubuff")) test_kprobe_multi_fill_link_info(skel, true, true); + if (test__start_subtest("uprobe_multi_link_info")) + test_uprobe_multi_fill_link_info(skel, false, false); + if (test__start_subtest("uretprobe_multi_link_info")) + test_uprobe_multi_fill_link_info(skel, true, false); + if (test__start_subtest("uprobe_multi_invalid")) + test_uprobe_multi_fill_link_info(skel, false, true); + cleanup: test_fill_link_info__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c index 564f402d56fe..69509f8bb680 100644 --- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c +++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c @@ -39,4 +39,10 @@ int BPF_PROG(kmulti_run) return 0; } +SEC("uprobe.multi") +int BPF_PROG(umulti_run) +{ + return 0; +} + char _license[] SEC("license") = "GPL"; From patchwork Thu Nov 9 09:28:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Olsa X-Patchwork-Id: 13450841 X-Patchwork-Delegate: bpf@iogearbox.net Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2B83C23C8 for ; Thu, 9 Nov 2023 09:29:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ciO0BGPL" Received: by smtp.kernel.org (Postfix) with ESMTPSA id CDA9AC433C7; Thu, 9 Nov 2023 09:29:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699522189; bh=P+pPEdgvwHFVa+c8e06tOkfnEhPYO+jpRUK3poe+nr8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ciO0BGPLAIlA4rT5Iqvsg/AEzERebjoQ/SU1a2NAUH5SsbZSQYsyjIZISkbyCDadq SdevPDK5RTTNwPElSWsKgN+8tnjCseuw0+4PR28CtdljhgldBfJRHoKdAeuGYh6C7H lzj+Y4CfiDwRFFfV4DB0YtVN4d2Gc/2PBhbXkMsSHxGcG8mqI2TLTh3zgszO//JTea RZN8rOJv9rhf5VzXyIjPTjOB3y4N9HXrjX2411KMZWYxLxIr+akEEyfq9AkkCIZql+ DrztCqB3ifmzX+G3K26TlkY7aGFqQPn8fOFgKbfevyQo6aRHZ+JB1IBDCvJMcjofgT H6jIouOdmnp4g== From: Jiri Olsa To: Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko Cc: Song Liu , Quentin Monnet , bpf@vger.kernel.org, Martin KaFai Lau , Song Liu , Yonghong Song , John Fastabend , KP Singh , Stanislav Fomichev , Hao Luo , Yafang Shao Subject: [PATCHv2 bpf-next 6/6] bpftool: Add support to display uprobe_multi links Date: Thu, 9 Nov 2023 10:28:38 +0100 Message-ID: <20231109092838.721233-7-jolsa@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20231109092838.721233-1-jolsa@kernel.org> References: <20231109092838.721233-1-jolsa@kernel.org> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: bpf@iogearbox.net Adding support to display details for uprobe_multi links, both plain: # bpftool link -p ... 24: uprobe_multi prog 126 uprobe.multi path /home/jolsa/bpf/test_progs func_cnt 3 pid 4143 offset ref_ctr_offset cookies 0xd1f88 0xf5d5a8 0xdead 0xd1f8f 0xf5d5aa 0xbeef 0xd1f96 0xf5d5ac 0xcafe and json: # bpftool link -p [{ ... },{ "id": 24, "type": "uprobe_multi", "prog_id": 126, "retprobe": false, "path": "/home/jolsa/bpf/test_progs", "func_cnt": 3, "pid": 4143, "funcs": [{ "offset": 860040, "ref_ctr_offset": 16111016, "cookie": 57005 },{ "offset": 860047, "ref_ctr_offset": 16111018, "cookie": 48879 },{ "offset": 860054, "ref_ctr_offset": 16111020, "cookie": 51966 } ] } ] Acked-by: Song Liu Reviewed-by: Quentin Monnet Signed-off-by: Jiri Olsa --- tools/bpf/bpftool/link.c | 105 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 103 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index a1528cde81ab..d198adf77d81 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -294,6 +294,37 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) jsonw_end_array(json_wtr); } +static __u64 *u64_to_arr(__u64 val) +{ + return (__u64 *) u64_to_ptr(val); +} + +static void +show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) +{ + __u32 i; + + jsonw_bool_field(json_wtr, "retprobe", + info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN); + jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path)); + jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count); + jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid); + jsonw_name(json_wtr, "funcs"); + jsonw_start_array(json_wtr); + + for (i = 0; i < info->uprobe_multi.count; i++) { + jsonw_start_object(json_wtr); + jsonw_uint_field(json_wtr, "offset", + u64_to_arr(info->uprobe_multi.offsets)[i]); + jsonw_uint_field(json_wtr, "ref_ctr_offset", + u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]); + jsonw_uint_field(json_wtr, "cookie", + u64_to_arr(info->uprobe_multi.cookies)[i]); + jsonw_end_object(json_wtr); + } + jsonw_end_array(json_wtr); +} + static void show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr) { @@ -465,6 +496,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) case BPF_LINK_TYPE_KPROBE_MULTI: show_kprobe_multi_json(info, json_wtr); break; + case BPF_LINK_TYPE_UPROBE_MULTI: + show_uprobe_multi_json(info, json_wtr); + break; case BPF_LINK_TYPE_PERF_EVENT: switch (info->perf_event.type) { case BPF_PERF_EVENT_EVENT: @@ -674,6 +708,33 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info) } } +static void show_uprobe_multi_plain(struct bpf_link_info *info) +{ + __u32 i; + + if (!info->uprobe_multi.count) + return; + + if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN) + printf("\n\turetprobe.multi "); + else + printf("\n\tuprobe.multi "); + + printf("path %s ", (char *) u64_to_ptr(info->uprobe_multi.path)); + printf("func_cnt %u ", info->uprobe_multi.count); + + if (info->uprobe_multi.pid != (__u32) -1) + printf("pid %d ", info->uprobe_multi.pid); + + printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies"); + for (i = 0; i < info->uprobe_multi.count; i++) { + printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx", + u64_to_arr(info->uprobe_multi.offsets)[i], + u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i], + u64_to_arr(info->uprobe_multi.cookies)[i]); + } +} + static void show_perf_event_kprobe_plain(struct bpf_link_info *info) { const char *buf; @@ -807,6 +868,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) case BPF_LINK_TYPE_KPROBE_MULTI: show_kprobe_multi_plain(info); break; + case BPF_LINK_TYPE_UPROBE_MULTI: + show_uprobe_multi_plain(info); + break; case BPF_LINK_TYPE_PERF_EVENT: switch (info->perf_event.type) { case BPF_PERF_EVENT_EVENT: @@ -846,8 +910,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) static int do_show_link(int fd) { + __u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL; struct bpf_link_info info; __u32 len = sizeof(info); + char path_buf[PATH_MAX]; __u64 *addrs = NULL; char buf[PATH_MAX]; int count; @@ -889,6 +955,39 @@ static int do_show_link(int fd) goto again; } } + if (info.type == BPF_LINK_TYPE_UPROBE_MULTI && + !info.uprobe_multi.offsets) { + count = info.uprobe_multi.count; + if (count) { + offsets = calloc(count, sizeof(__u64)); + if (!offsets) { + p_err("mem alloc failed"); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.offsets = ptr_to_u64(offsets); + ref_ctr_offsets = calloc(count, sizeof(__u64)); + if (!ref_ctr_offsets) { + p_err("mem alloc failed"); + free(offsets); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets); + cookies = calloc(count, sizeof(__u64)); + if (!cookies) { + p_err("mem alloc failed"); + free(cookies); + free(offsets); + close(fd); + return -ENOMEM; + } + info.uprobe_multi.cookies = ptr_to_u64(cookies); + info.uprobe_multi.path = ptr_to_u64(path_buf); + info.uprobe_multi.path_size = sizeof(path_buf); + goto again; + } + } if (info.type == BPF_LINK_TYPE_PERF_EVENT) { switch (info.perf_event.type) { case BPF_PERF_EVENT_TRACEPOINT: @@ -924,8 +1023,10 @@ static int do_show_link(int fd) else show_link_close_plain(fd, &info); - if (addrs) - free(addrs); + free(ref_ctr_offsets); + free(cookies); + free(offsets); + free(addrs); close(fd); return 0; }