@@ -1297,9 +1297,8 @@ int uprobe_register_batch(struct inode *inode, int cnt,
return -EINVAL;
}
+ /* pre-allocate new uprobe instances */
for (i = 0; i < cnt; i++) {
- struct uprobe *cur_uprobe;
-
uc = get_uprobe_consumer(i, ctx);
uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
@@ -1316,6 +1315,15 @@ int uprobe_register_batch(struct inode *inode, int cnt,
RB_CLEAR_NODE(&uprobe->rb_node);
atomic64_set(&uprobe->ref, 1);
+ uc->uprobe = uprobe;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ struct uprobe *cur_uprobe;
+
+ uc = get_uprobe_consumer(i, ctx);
+ uprobe = uc->uprobe;
+
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
/* a uprobe exists for this inode:offset combination */
@@ -1323,15 +1331,12 @@ int uprobe_register_batch(struct inode *inode, int cnt,
if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
ref_ctr_mismatch_warn(cur_uprobe, uprobe);
put_uprobe(cur_uprobe);
- kfree(uprobe);
ret = -EINVAL;
goto cleanup_uprobes;
}
kfree(uprobe);
- uprobe = cur_uprobe;
+ uc->uprobe = cur_uprobe;
}
-
- uc->uprobe = uprobe;
}
for (i = 0; i < cnt; i++) {
Now we are ready to split alloc-and-insert coupled step into two separate phases. First, we allocate and prepare all potentially-to-be-inserted uprobe instances, assuming corresponding uprobes are not yet in uprobes_tree. This is needed so that we don't do memory allocations under uprobes_treelock (once we batch locking for each step). Second, we insert new uprobes or reuse already existing ones into uprobes_tree. Any uprobe that turned out to be not necessary is immediately freed, as there are no other references to it. This concludes preparations that make uprobes_register_batch() ready to batch and optimize locking per each phase. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> --- kernel/events/uprobes.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-)