@@ -2283,6 +2283,9 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
rcu_assign_pointer(event->tp_event->prog_array, new_array);
bpf_prog_array_free_sleepable(old_array);
+ if (prog->kprobe_override)
+ trace_kprobe_error_injection_control(event->tp_event, true);
+
unlock:
mutex_unlock(&bpf_event_mutex);
return ret;
@@ -2299,6 +2302,9 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
if (!event->prog)
goto unlock;
+ if (event->prog->kprobe_override)
+ trace_kprobe_error_injection_control(event->tp_event, false);
+
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
if (ret == -ENOENT)
@@ -61,6 +61,7 @@ struct trace_kprobe {
unsigned long __percpu *nhit;
const char *symbol; /* symbol name */
struct trace_probe tp;
+ struct static_key *ei_key;
};
static bool is_trace_kprobe(struct dyn_event *ev)
@@ -235,9 +236,34 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
+ struct static_key *ei_key;
- return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
- false;
+ if (!tk)
+ return false;
+
+ ei_key = get_injection_key(trace_kprobe_address(tk));
+ if (IS_ERR(ei_key))
+ return false;
+
+ tk->ei_key = ei_key;
+ return true;
+}
+
+void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enable)
+{
+ struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
+
+ if (!tk)
+ return;
+
+ if (!tk->ei_key)
+ return;
+
+ if (enable)
+ static_key_slow_inc(tk->ei_key);
+ else
+ static_key_slow_dec(tk->ei_key);
}
static int register_kprobe_event(struct trace_kprobe *tk);
@@ -212,6 +212,8 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
#ifdef CONFIG_KPROBE_EVENTS
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
bool trace_kprobe_error_injectable(struct trace_event_call *call);
+void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enabled);
#else
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
@@ -222,6 +224,9 @@ static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
return false;
}
+
+static inline void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enabled) { }
#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
Functions marked for error injection can have an associated static key that guards the callsite(s) to avoid overhead of calling an empty function when no error injection is in progress. Outside of the error injection framework itself, bpf programs can be atteched to perf events and override results of error-injectable functions. To make sure these functions are actually called, attaching such bpf programs should control the static key accordingly. Therefore, add the static key's address to struct trace_kprobe and fill it in trace_kprobe_error_injectable(), using get_injection_key() instead of within_error_injection_list(). Introduce trace_kprobe_error_injection_control() to control the static key and call the control function when attaching or detaching programs with kprobe_override to perf events. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- kernel/trace/bpf_trace.c | 6 ++++++ kernel/trace/trace_kprobe.c | 30 ++++++++++++++++++++++++++++-- kernel/trace/trace_probe.h | 5 +++++ 3 files changed, 39 insertions(+), 2 deletions(-)