@@ -1239,6 +1239,29 @@ Here are the available options:
When the free_buffer is closed, tracing will
stop (tracing_on set to 0).
+ filter-buffer
+ Normally, when the filter is enabled, a temporary buffer is
+ created to copy the event data into it to perform the
+ filtering logic. If the filter passes and the event should
+ be recorded, then the event is copied from the temporary
+ buffer into the ring buffer. If the event is to be discarded
+ then it is simply dropped. If another event comes in via
+ an interrupt, it will not use the temporary buffer as it is
+ busy and will write directly into the ring buffer.
+
+ This option, when cleared, will disable the temporary buffer and always
+ write into the ring buffer. This will avoid the copy when
+ the event is to be recorded, but also adds a bit more
+ overhead on the discard, and if another event were to interrupt
+ the event that is to be discarded, then the event will not
+ be removed from the ring buffer but instead converted to
+ padding that will not be read by the reader. Padding will
+ still take up space on the ring buffer.
+
+ This option can be beneficial if most events are recorded and
+ not discarded, or simply for debugging the discard functionality
+ of the ring buffer.
+
irq-info
Shows the interrupt, preempt count, need resched data.
When disabled, the trace looks like::
@@ -466,7 +466,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
- TRACE_ITER_HASH_PTR)
+ TRACE_ITER_HASH_PTR | TRACE_ITER_FILTER_BUF)
/* trace_options that are only supported by global_trace */
#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
@@ -5398,6 +5398,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
+static int __tracing_set_filter_buffering(struct trace_array *tr, bool set);
+
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
int *map;
@@ -5451,6 +5453,9 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_FUNC_FORK)
ftrace_pid_follow_fork(tr, enabled);
+ if (mask == TRACE_ITER_FILTER_BUF)
+ __tracing_set_filter_buffering(tr, !enabled);
+
if (mask == TRACE_ITER_OVERWRITE) {
ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -6464,7 +6469,7 @@ static void tracing_set_nop(struct trace_array *tr)
{
if (tr->current_trace == &nop_trace)
return;
-
+
tr->current_trace->enabled--;
if (tr->current_trace->reset)
@@ -7552,27 +7557,29 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
return ring_buffer_event_time_stamp(buffer, rbe);
}
-/*
- * Set or disable using the per CPU trace_buffer_event when possible.
- */
-int tracing_set_filter_buffering(struct trace_array *tr, bool set)
+static int __tracing_set_filter_buffering(struct trace_array *tr, bool set)
{
- int ret = 0;
-
- mutex_lock(&trace_types_lock);
-
if (set && tr->no_filter_buffering_ref++)
- goto out;
+ return 0;
if (!set) {
- if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
+ return -EINVAL;
--tr->no_filter_buffering_ref;
}
- out:
+ return 0;
+}
+
+/*
+ * Set or disable using the per CPU trace_buffer_event when possible.
+ */
+int tracing_set_filter_buffering(struct trace_array *tr, bool set)
+{
+ int ret;
+
+ mutex_lock(&trace_types_lock);
+ ret = __tracing_set_filter_buffering(tr, set);
mutex_unlock(&trace_types_lock);
return ret;
@@ -1251,6 +1251,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(EVENT_FORK, "event-fork"), \
C(PAUSE_ON_TRACE, "pause-on-trace"), \
C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
+ C(FILTER_BUF, "filter-buffer"), \
FUNCTION_FLAGS \
FGRAPH_FLAGS \
STACK_FLAGS \