@@ -99,6 +99,7 @@ config RISCV
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
+ select FUNCTION_ALIGNMENT_8B if DYNAMIC_FTRACE_WITH_CALL_OPS
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -152,6 +153,7 @@ config RISCV
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
select FUNCTION_ALIGNMENT_4B if HAVE_DYNAMIC_FTRACE && RISCV_ISA_C
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG)
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_GRAPH_FUNC
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
@@ -15,9 +15,9 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux += --no-relax
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
ifeq ($(CONFIG_RISCV_ISA_C),y)
- CC_FLAGS_FTRACE := -fpatchable-function-entry=4
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=8,4
else
- CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
endif
endif
@@ -493,6 +493,9 @@ void asm_offsets(void)
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
OFFSET(STACKFRAME_FP, stackframe, fp);
OFFSET(STACKFRAME_RA, stackframe, ra);
+#ifdef CONFIG_FUNCTION_TRACER
+ DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func));
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
DEFINE(FREGS_SIZE_ON_STACK, ALIGN(sizeof(struct __arch_ftrace_regs), STACK_ALIGN));
@@ -15,6 +15,9 @@
unsigned long ftrace_call_adjust(unsigned long addr)
{
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return addr + 8;
+
return addr + MCOUNT_AUIPC_SIZE;
}
@@ -64,9 +67,52 @@ static int __ftrace_modify_call(unsigned long source, unsigned long target, bool
return 0;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+static const struct ftrace_ops *riscv64_rec_get_ops(struct dyn_ftrace *rec)
+{
+ const struct ftrace_ops *ops = NULL;
+
+ if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+ ops = ftrace_find_unique_ops(rec);
+ WARN_ON_ONCE(!ops);
+ }
+
+ if (!ops)
+ ops = &ftrace_list_ops;
+
+ return ops;
+}
+
+static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
+ const struct ftrace_ops *ops)
+{
+ unsigned long literal = rec->ip - 8;
+
+ return patch_text_nosync((void *)literal, &ops, sizeof(ops));
+}
+
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
+}
+
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
+{
+ return ftrace_rec_set_ops(rec, riscv64_rec_get_ops(rec));
+}
+#else
+static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
+static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
+#endif
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long distance, orig_addr, pc = rec->ip - MCOUNT_AUIPC_SIZE;
+ int ret;
+
+ ret = ftrace_rec_update_ops(rec);
+ if (ret)
+ return ret;
orig_addr = (unsigned long)&ftrace_caller;
distance = addr > orig_addr ? addr - orig_addr : orig_addr - addr;
@@ -79,6 +125,11 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
u32 nop4 = RISCV_INSN_NOP4;
+ int ret;
+
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
if (patch_insn_write((void *)rec->ip, &nop4, MCOUNT_NOP4_SIZE))
return -EPERM;
@@ -99,6 +150,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
unsigned int nops[2], offset;
int ret;
+ ret = ftrace_rec_set_nop_ops(rec);
+ if (ret)
+ return ret;
+
offset = (unsigned long) &ftrace_caller - pc;
nops[0] = to_auipc_t0(offset);
nops[1] = RISCV_INSN_NOP4;
@@ -113,6 +168,13 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
ftrace_func_t ftrace_call_dest = ftrace_stub;
int ftrace_update_ftrace_func(ftrace_func_t func)
{
+ /*
+ * When using CALL_OPS, the function to call is associated with the
+ * call site, and we don't have a global function pointer to update.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
+ return 0;
+
WRITE_ONCE(ftrace_call_dest, func);
/*
* The data fence ensure that the update to ftrace_call_dest happens
@@ -138,8 +200,13 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
{
unsigned long caller = rec->ip - MCOUNT_AUIPC_SIZE;
unsigned int call[2];
+ int ret;
make_call_t0(caller, old_addr, call);
+ ret = ftrace_rec_update_ops(rec);
+ if (ret)
+ return ret;
+
return __ftrace_modify_call(caller, addr, true);
}
#endif
@@ -139,10 +139,34 @@
.macro PREPARE_ARGS
addi a0, t0, -MCOUNT_JALR_SIZE // ip (callsite's jalr insn)
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ /*
+ * When CALL_OPS is enabled (2 or 4) nops [8B] are placed before the
+ * function entry, these are later overwritten with the pointer to the
+ * associated struct ftrace_ops.
+ *
+ * -8: &ftrace_ops of the associated tracer function.
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ * -8: &ftrace_nop_ops
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
+ * t0 is set to ip+8 after the jalr is executed at the callsite,
+ * so we find the associated op at t0-16.
+ */
+ mv a1, ra // parent_ip
+ REG_L a2, -16(t0) // op
+ REG_L ra, FTRACE_OPS_FUNC(a2) // op->func
+#else
la a1, function_trace_op
- REG_L a2, 0(a1)
- mv a1, ra
- mv a3, sp
+ REG_L a2, 0(a1) // op
+ mv a1, ra // parent_ip
+#endif
+ mv a3, sp // regs
.endm
SYM_FUNC_START(ftrace_caller)
@@ -150,10 +174,13 @@ SYM_FUNC_START(ftrace_caller)
SAVE_ABI_REGS
PREPARE_ARGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+ jalr ra
+#else
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
REG_L ra, ftrace_call_dest
jalr ra, 0(ra)
-
+#endif
RESTORE_ABI_REGS
bnez t1, .Ldirect
jr t0