Context |
Check |
Description |
netdev/tree_selection |
success
|
Not a local patch
|
bpf/vmtest-bpf-next-VM_Test-0 |
success
|
Logs for Lint
|
bpf/vmtest-bpf-next-VM_Test-3 |
success
|
Logs for Validate matrix.py
|
bpf/vmtest-bpf-next-VM_Test-1 |
success
|
Logs for ShellCheck
|
bpf/vmtest-bpf-next-VM_Test-2 |
success
|
Logs for Unittests
|
bpf/vmtest-bpf-next-VM_Test-4 |
success
|
Logs for aarch64-gcc / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-5 |
success
|
Logs for aarch64-gcc / build / build for aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-6 |
success
|
Logs for aarch64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-10 |
success
|
Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-11 |
success
|
Logs for aarch64-gcc / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-12 |
success
|
Logs for aarch64-gcc / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-13 |
success
|
Logs for s390x-gcc / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-14 |
success
|
Logs for s390x-gcc / build / build for s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-15 |
success
|
Logs for s390x-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-18 |
success
|
Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-19 |
success
|
Logs for s390x-gcc / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-20 |
success
|
Logs for s390x-gcc / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-21 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-23 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-24 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-30 |
success
|
Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-34 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-35 |
success
|
Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
|
bpf/vmtest-bpf-next-VM_Test-39 |
success
|
Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-40 |
success
|
Logs for x86_64-llvm-17 / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-41 |
success
|
Logs for x86_64-llvm-17 / veristat-meta
|
bpf/vmtest-bpf-next-VM_Test-43 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-44 |
success
|
Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
|
bpf/vmtest-bpf-next-VM_Test-49 |
success
|
Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-50 |
success
|
Logs for x86_64-llvm-18 / veristat-kernel
|
bpf/vmtest-bpf-next-VM_Test-51 |
success
|
Logs for x86_64-llvm-18 / veristat-meta
|
bpf/vmtest-bpf-next-PR |
fail
|
PR summary
|
bpf/vmtest-bpf-next-VM_Test-7 |
success
|
Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-8 |
success
|
Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-9 |
success
|
Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-16 |
success
|
Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-17 |
success
|
Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-22 |
success
|
Logs for x86_64-gcc / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-25 |
success
|
Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-26 |
fail
|
Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-27 |
fail
|
Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-28 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-29 |
success
|
Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-31 |
success
|
Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
|
bpf/vmtest-bpf-next-VM_Test-32 |
success
|
Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
|
bpf/vmtest-bpf-next-VM_Test-33 |
success
|
Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-36 |
success
|
Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-37 |
fail
|
Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-38 |
fail
|
Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-42 |
success
|
Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
|
bpf/vmtest-bpf-next-VM_Test-45 |
success
|
Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-46 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-47 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-48 |
fail
|
Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
|
@@ -798,19 +798,134 @@ static __maybe_unused void uprobe_trampoline_put(struct uprobe_trampoline *tramp
destroy_uprobe_trampoline(tramp);
}
+struct mm_uprobe {
+ struct rb_node rb_node;
+ unsigned long auprobe;
+ unsigned long vaddr;
+};
+
+#define __node_2_mm_uprobe(node) rb_entry((node), struct mm_uprobe, rb_node)
+
+struct __mm_uprobe_key {
+ unsigned long auprobe;
+ unsigned long vaddr;
+};
+
+static inline int mm_uprobe_cmp(unsigned long l_auprobe, unsigned long l_vaddr,
+ const struct mm_uprobe *r_mmu)
+{
+ if (l_auprobe < r_mmu->auprobe)
+ return -1;
+ if (l_auprobe > r_mmu->auprobe)
+ return 1;
+ if (l_vaddr < r_mmu->vaddr)
+ return -1;
+ if (l_vaddr > r_mmu->vaddr)
+ return 1;
+
+ return 0;
+}
+
+static inline int __mm_uprobe_cmp(struct rb_node *a, const struct rb_node *b)
+{
+ struct mm_uprobe *mmu_a = __node_2_mm_uprobe(a);
+
+ return mm_uprobe_cmp(mmu_a->auprobe, mmu_a->vaddr, __node_2_mm_uprobe(b));
+}
+
+static inline bool __mm_uprobe_less(struct rb_node *a, const struct rb_node *b)
+{
+ struct mm_uprobe *mmu_a = __node_2_mm_uprobe(a);
+
+ return mm_uprobe_cmp(mmu_a->auprobe, mmu_a->vaddr, __node_2_mm_uprobe(b)) < 0;
+}
+
+static inline int __mm_uprobe_cmp_key(const void *key, const struct rb_node *b)
+{
+ const struct __mm_uprobe_key *a = key;
+
+ return mm_uprobe_cmp(a->auprobe, a->vaddr, __node_2_mm_uprobe(b));
+}
+
+static struct mm_uprobe *find_mm_uprobe(struct mm_struct *mm, struct arch_uprobe *auprobe,
+ unsigned long vaddr)
+{
+ struct __mm_uprobe_key key = {
+ .auprobe = (unsigned long) auprobe,
+ .vaddr = vaddr,
+ };
+ struct rb_node *node;
+
+ node = rb_find(&key, &mm->uprobes_state.root_uprobes, __mm_uprobe_cmp_key);
+ return node ? __node_2_mm_uprobe(node) : NULL;
+}
+
+static struct mm_uprobe *insert_mm_uprobe(struct mm_struct *mm, struct arch_uprobe *auprobe,
+ unsigned long vaddr)
+{
+ struct mm_uprobe *mmu;
+
+ mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
+ if (mmu) {
+ mmu->auprobe = (unsigned long) auprobe;
+ mmu->vaddr = vaddr;
+ RB_CLEAR_NODE(&mmu->rb_node);
+ rb_add(&mmu->rb_node, &mm->uprobes_state.root_uprobes, __mm_uprobe_less);
+ }
+ return mmu;
+}
+
+static void destroy_mm_uprobe(struct mm_uprobe *mmu, struct rb_root *root)
+{
+ rb_erase(&mmu->rb_node, root);
+ kfree(mmu);
+}
+
+int set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
+{
+ struct mm_uprobe *mmu;
+
+ if (find_mm_uprobe(mm, auprobe, vaddr))
+ return 0;
+ mmu = insert_mm_uprobe(mm, auprobe, vaddr);
+ if (!mmu)
+ return -ENOMEM;
+ return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN, false);
+}
+
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
+{
+ struct mm_uprobe *mmu;
+
+ mmu = find_mm_uprobe(mm, auprobe, vaddr);
+ if (!mmu)
+ return 0;
+ destroy_mm_uprobe(mmu, &mm->uprobes_state.root_uprobes);
+ return uprobe_write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn, true);
+}
+
void arch_uprobe_init_state(struct mm_struct *mm)
{
INIT_HLIST_HEAD(&mm->uprobes_state.head_tramps);
+ mm->uprobes_state.root_uprobes = RB_ROOT;
}
void arch_uprobe_clear_state(struct mm_struct *mm)
{
struct uprobes_state *state = &mm->uprobes_state;
struct uprobe_trampoline *tramp;
+ struct rb_node *node, *next;
struct hlist_node *n;
hlist_for_each_entry_safe(tramp, n, &state->head_tramps, node)
destroy_uprobe_trampoline(tramp);
+
+ node = rb_first(&state->root_uprobes);
+ while (node) {
+ next = rb_next(node);
+ destroy_mm_uprobe(__node_2_mm_uprobe(node), &state->root_uprobes);
+ node = next;
+ }
}
#else /* 32-bit: */
/*
@@ -186,6 +186,7 @@ struct uprobes_state {
struct xol_area *xol_area;
#ifdef CONFIG_X86_64
struct hlist_head head_tramps;
+ struct rb_root root_uprobes;
#endif
};
We keep track of global uprobe instances, because with just 2 types of update - writing breakpoint or original opcode - we don't need to track the state of the specific uprobe state for mm_struct. With optimized uprobe support we will need to make several instructions updates and make sure we keep the state of the update per mm_struct. Adding the mm_uprobe object to keep track of installed uprobes per mm_struct. It's kept in rb_tree for fast lookups and the tree is cleaned up when the breakpoint is uninstalled or the mm_struct is released. The key is uprobe object's address together with virtual address of the breakpoint. The reason for the adding the latter to the key is that we can have multiple virtual addresses for single uprobe, because the code (for given offset) can be loaded multiple times. Signed-off-by: Jiri Olsa <jolsa@kernel.org> --- arch/x86/kernel/uprobes.c | 115 ++++++++++++++++++++++++++++++++++++++ include/linux/uprobes.h | 1 + 2 files changed, 116 insertions(+)