@@ -27,8 +27,8 @@
#undef __perf_task
#define __perf_task(t) (__task = (t))
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef _DECLARE_EVENT_CLASS
+#define _DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print, tp_flags) \
static notrace void \
perf_trace_##call(void *__data, proto) \
{ \
@@ -43,13 +43,16 @@ perf_trace_##call(void *__data, proto) \
int __data_size; \
int rctx; \
\
+ if ((tp_flags) & TRACEPOINT_MAYFAULT) \
+ preempt_disable_notrace(); \
+ \
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
\
head = this_cpu_ptr(event_call->perf_events); \
if (!bpf_prog_array_valid(event_call) && \
__builtin_constant_p(!__task) && !__task && \
hlist_empty(head)) \
- return; \
+ goto end; \
\
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
sizeof(u64)); \
@@ -57,7 +60,7 @@ perf_trace_##call(void *__data, proto) \
\
entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
if (!entry) \
- return; \
+ goto end; \
\
perf_fetch_caller_regs(__regs); \
\
@@ -68,8 +71,23 @@ perf_trace_##call(void *__data, proto) \
perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
event_call, __count, __regs, \
head, __task); \
+end: \
+ if ((tp_flags) & TRACEPOINT_MAYFAULT) \
+ preempt_enable_notrace(); \
}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+ _DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), \
+ PARAMS(tstruct), PARAMS(assign), PARAMS(print), 0)
+
+#undef DECLARE_EVENT_CLASS_MAYFAULT
+#define DECLARE_EVENT_CLASS_MAYFAULT(call, proto, args, tstruct, \
+ assign, print) \
+ _DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), \
+ PARAMS(tstruct), PARAMS(assign), PARAMS(print), \
+ TRACEPOINT_MAYFAULT)
+
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the