diff mbox series

[v19,RESEND,3/5] tracing: Allow user-space mapping of the ring-buffer

Message ID 20240326100830.1326610-4-vdonnefort@google.com (mailing list archive)
State New
Headers show
Series None | expand

Commit Message

Vincent Donnefort March 26, 2024, 10:08 a.m. UTC
Currently, user-space extracts data from the ring-buffer via splice,
which is handy for storage or network sharing. However, due to splice
limitations, it is imposible to do real-time analysis without a copy.

A solution for that problem is to let the user-space map the ring-buffer
directly.

The mapping is exposed via the per-CPU file trace_pipe_raw. The first
element of the mapping is the meta-page. It is followed by each
subbuffer constituting the ring-buffer, ordered by their unique page ID:

  * Meta-page -- include/uapi/linux/trace_mmap.h for a description
  * Subbuf ID 0
  * Subbuf ID 1
     ...

It is therefore easy to translate a subbuf ID into an offset in the
mapping:

  reader_id = meta->reader->id;
  reader_offset = meta->meta_page_size + reader_id * meta->subbuf_size;

When new data is available, the mapper must call a newly introduced ioctl:
TRACE_MMAP_IOCTL_GET_READER. This will update the Meta-page reader ID to
point to the next reader containing unread data.

Mapping will prevent snapshot and buffer size modifications.

CC: <linux-mm@kvack.org>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>

Comments

Steven Rostedt March 29, 2024, 6:40 p.m. UTC | #1
On Tue, 26 Mar 2024 10:08:28 +0000
Vincent Donnefort <vdonnefort@google.com> wrote:

> Currently, user-space extracts data from the ring-buffer via splice,
> which is handy for storage or network sharing. However, due to splice
> limitations, it is imposible to do real-time analysis without a copy.
> 
> A solution for that problem is to let the user-space map the ring-buffer
> directly.
> 
> The mapping is exposed via the per-CPU file trace_pipe_raw. The first
> element of the mapping is the meta-page. It is followed by each
> subbuffer constituting the ring-buffer, ordered by their unique page ID:
> 
>   * Meta-page -- include/uapi/linux/trace_mmap.h for a description
>   * Subbuf ID 0
>   * Subbuf ID 1
>      ...
> 
> It is therefore easy to translate a subbuf ID into an offset in the
> mapping:
> 
>   reader_id = meta->reader->id;
>   reader_offset = meta->meta_page_size + reader_id * meta->subbuf_size;
> 
> When new data is available, the mapper must call a newly introduced ioctl:
> TRACE_MMAP_IOCTL_GET_READER. This will update the Meta-page reader ID to
> point to the next reader containing unread data.
> 

Thanks for the update Vincent!

> Mapping will prevent snapshot and buffer size modifications.
> 
> CC: <linux-mm@kvack.org>
> Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
> 
> diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
> index ffcd8dfcaa4f..d25b9d504a7c 100644
> --- a/include/uapi/linux/trace_mmap.h
> +++ b/include/uapi/linux/trace_mmap.h
> @@ -43,4 +43,6 @@ struct trace_buffer_meta {
>  	__u64	Reserved2;
>  };
>  
> +#define TRACE_MMAP_IOCTL_GET_READER		_IO('T', 0x1)
> +
>  #endif /* _TRACE_MMAP_H_ */
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 233d1af39fff..0f37aa9860fd 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -1191,6 +1191,12 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
>  		return;
>  	}
>  
> +	if (tr->mapped) {
> +		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
> +		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
> +		return;
> +	}
> +
>  	local_irq_save(flags);
>  	update_max_tr(tr, current, smp_processor_id(), cond_data);
>  	local_irq_restore(flags);
> @@ -1323,7 +1329,7 @@ static int tracing_arm_snapshot_locked(struct trace_array *tr)
>  	lockdep_assert_held(&trace_types_lock);
>  
>  	spin_lock(&tr->snapshot_trigger_lock);
> -	if (tr->snapshot == UINT_MAX) {
> +	if (tr->snapshot == UINT_MAX || tr->mapped) {
>  		spin_unlock(&tr->snapshot_trigger_lock);
>  		return -EBUSY;
>  	}
> @@ -6068,7 +6074,7 @@ static void tracing_set_nop(struct trace_array *tr)
>  {
>  	if (tr->current_trace == &nop_trace)
>  		return;
> -	
> +
>  	tr->current_trace->enabled--;
>  
>  	if (tr->current_trace->reset)
> @@ -8194,15 +8200,32 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
>  	return ret;
>  }
>  
> -/* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
>  static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
>  {
>  	struct ftrace_buffer_info *info = file->private_data;
>  	struct trace_iterator *iter = &info->iter;
> +	int err;
> +
> +	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
> +		if (!(file->f_flags & O_NONBLOCK)) {
> +			err = ring_buffer_wait(iter->array_buffer->buffer,
> +					       iter->cpu_file,
> +					       iter->tr->buffer_percent,
> +					       NULL, NULL);
> +			if (err)
> +				return err;
> +		}
>  
> -	if (cmd)
> -		return -ENOIOCTLCMD;
> +		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
> +						  iter->cpu_file);
> +	} else if (cmd) {
> +		return -ENOTTY;
> +	}
>  
> +	/*
> +	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
> +	 * waiters
> +	 */
>  	mutex_lock(&trace_types_lock);
>  
>  	/* Make sure the waiters see the new wait_index */
> @@ -8214,6 +8237,94 @@ static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned
>  	return 0;
>  }
>  
> +static vm_fault_t tracing_buffers_mmap_fault(struct vm_fault *vmf)
> +{
> +	return VM_FAULT_SIGBUS;
> +}

If this is all it does, I don't believe it's needed.

> +
> +#ifdef CONFIG_TRACER_MAX_TRACE
> +static int get_snapshot_map(struct trace_array *tr)
> +{
> +	int err = 0;
> +
> +	/*
> +	 * Called with mmap_lock held. lockdep would be unhappy if we would now
> +	 * take trace_types_lock. Instead use the specific
> +	 * snapshot_trigger_lock.
> +	 */
> +	spin_lock(&tr->snapshot_trigger_lock);
> +
> +	if (tr->snapshot || tr->mapped == UINT_MAX)
> +		err = -EBUSY;
> +	else
> +		tr->mapped++;
> +
> +	spin_unlock(&tr->snapshot_trigger_lock);
> +
> +	/* Wait for update_max_tr() to observe iter->tr->mapped */
> +	if (tr->mapped == 1)
> +		synchronize_rcu();
> +
> +	return err;
> +
> +}
> +static void put_snapshot_map(struct trace_array *tr)
> +{
> +	spin_lock(&tr->snapshot_trigger_lock);
> +	if (!WARN_ON(!tr->mapped))
> +		tr->mapped--;
> +	spin_unlock(&tr->snapshot_trigger_lock);
> +}
> +#else
> +static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
> +static inline void put_snapshot_map(struct trace_array *tr) { }
> +#endif
> +
> +static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
> +{
> +	struct ftrace_buffer_info *info = vma->vm_file->private_data;
> +	struct trace_iterator *iter = &info->iter;
> +
> +	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
> +	put_snapshot_map(iter->tr);
> +}
> +
> +static void tracing_buffers_mmap_open(struct vm_area_struct *vma) { }

Same for the open.


> +
> +static const struct vm_operations_struct tracing_buffers_vmops = {
> +	.open		= tracing_buffers_mmap_open,
> +	.close		= tracing_buffers_mmap_close,
> +	.fault		= tracing_buffers_mmap_fault,
> +};

I replaced this with:

static const struct vm_operations_struct tracing_buffers_vmops = {
	.close		= tracing_buffers_mmap_close,
};

And it appears to work just fine. The mm code handles the NULL cases for
.open and .fault.

Is there any reason to do something different than the mm defaults?

-- Steve

> +
> +static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	struct ftrace_buffer_info *info = filp->private_data;
> +	struct trace_iterator *iter = &info->iter;
> +	int ret = 0;
> +
> +	if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
> +	    !(vma->vm_flags & VM_MAYSHARE))
> +		return -EPERM;
> +
> +	vm_flags_mod(vma,
> +		     VM_MIXEDMAP | VM_PFNMAP |
> +		     VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO,
> +		     VM_MAYWRITE);
> +
> +	vma->vm_ops = &tracing_buffers_vmops;
> +
> +	ret = get_snapshot_map(iter->tr);
> +	if (ret)
> +		return ret;
> +
> +	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
> +	if (ret)
> +		put_snapshot_map(iter->tr);
> +
> +	return ret;
> +}
> +
>  static const struct file_operations tracing_buffers_fops = {
>  	.open		= tracing_buffers_open,
>  	.read		= tracing_buffers_read,
> @@ -8223,6 +8334,7 @@ static const struct file_operations tracing_buffers_fops = {
>  	.splice_read	= tracing_buffers_splice_read,
>  	.unlocked_ioctl = tracing_buffers_ioctl,
>  	.llseek		= no_llseek,
> +	.mmap		= tracing_buffers_mmap,
>  };
>  
>  static ssize_t
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index 64450615ca0c..749a182dab48 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -336,6 +336,7 @@ struct trace_array {
>  	bool			allocated_snapshot;
>  	spinlock_t		snapshot_trigger_lock;
>  	unsigned int		snapshot;
> +	unsigned int		mapped;
>  	unsigned long		max_latency;
>  #ifdef CONFIG_FSNOTIFY
>  	struct dentry		*d_max_latency;
Steven Rostedt April 3, 2024, 2:13 p.m. UTC | #2
On Fri, 29 Mar 2024 14:40:55 -0400
Steven Rostedt <rostedt@goodmis.org> wrote:

> > +static vm_fault_t tracing_buffers_mmap_fault(struct vm_fault *vmf)
> > +{
> > +	return VM_FAULT_SIGBUS;
> > +}  
> 
> If this is all it does, I don't believe it's needed.
> 
> > +
> > +#ifdef CONFIG_TRACER_MAX_TRACE
> > +static int get_snapshot_map(struct trace_array *tr)
> > +{
> > +	int err = 0;
> > +
> > +	/*
> > +	 * Called with mmap_lock held. lockdep would be unhappy if we would now
> > +	 * take trace_types_lock. Instead use the specific
> > +	 * snapshot_trigger_lock.
> > +	 */
> > +	spin_lock(&tr->snapshot_trigger_lock);
> > +
> > +	if (tr->snapshot || tr->mapped == UINT_MAX)
> > +		err = -EBUSY;
> > +	else
> > +		tr->mapped++;
> > +
> > +	spin_unlock(&tr->snapshot_trigger_lock);
> > +
> > +	/* Wait for update_max_tr() to observe iter->tr->mapped */
> > +	if (tr->mapped == 1)
> > +		synchronize_rcu();
> > +
> > +	return err;
> > +
> > +}
> > +static void put_snapshot_map(struct trace_array *tr)
> > +{
> > +	spin_lock(&tr->snapshot_trigger_lock);
> > +	if (!WARN_ON(!tr->mapped))
> > +		tr->mapped--;
> > +	spin_unlock(&tr->snapshot_trigger_lock);
> > +}
> > +#else
> > +static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
> > +static inline void put_snapshot_map(struct trace_array *tr) { }
> > +#endif
> > +
> > +static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
> > +{
> > +	struct ftrace_buffer_info *info = vma->vm_file->private_data;
> > +	struct trace_iterator *iter = &info->iter;
> > +
> > +	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
> > +	put_snapshot_map(iter->tr);
> > +}
> > +
> > +static void tracing_buffers_mmap_open(struct vm_area_struct *vma) { }  
> 
> Same for the open.
> 
> 
> > +
> > +static const struct vm_operations_struct tracing_buffers_vmops = {
> > +	.open		= tracing_buffers_mmap_open,
> > +	.close		= tracing_buffers_mmap_close,
> > +	.fault		= tracing_buffers_mmap_fault,
> > +};  
> 
> I replaced this with:
> 
> static const struct vm_operations_struct tracing_buffers_vmops = {
> 	.close		= tracing_buffers_mmap_close,
> };
> 
> And it appears to work just fine. The mm code handles the NULL cases for
> .open and .fault.
> 
> Is there any reason to do something different than the mm defaults?

Hi Vincent,

Do you plan on sending out a v20 series?

-- Steve
Vincent Donnefort April 3, 2024, 2:39 p.m. UTC | #3
On Wed, Apr 03, 2024 at 10:13:52AM -0400, Steven Rostedt wrote:
> On Fri, 29 Mar 2024 14:40:55 -0400
> Steven Rostedt <rostedt@goodmis.org> wrote:
> 
> > > +static vm_fault_t tracing_buffers_mmap_fault(struct vm_fault *vmf)
> > > +{
> > > +	return VM_FAULT_SIGBUS;
> > > +}  
> > 
> > If this is all it does, I don't believe it's needed.
> > 
> > > +
> > > +#ifdef CONFIG_TRACER_MAX_TRACE
> > > +static int get_snapshot_map(struct trace_array *tr)
> > > +{
> > > +	int err = 0;
> > > +
> > > +	/*
> > > +	 * Called with mmap_lock held. lockdep would be unhappy if we would now
> > > +	 * take trace_types_lock. Instead use the specific
> > > +	 * snapshot_trigger_lock.
> > > +	 */
> > > +	spin_lock(&tr->snapshot_trigger_lock);
> > > +
> > > +	if (tr->snapshot || tr->mapped == UINT_MAX)
> > > +		err = -EBUSY;
> > > +	else
> > > +		tr->mapped++;
> > > +
> > > +	spin_unlock(&tr->snapshot_trigger_lock);
> > > +
> > > +	/* Wait for update_max_tr() to observe iter->tr->mapped */
> > > +	if (tr->mapped == 1)
> > > +		synchronize_rcu();
> > > +
> > > +	return err;
> > > +
> > > +}
> > > +static void put_snapshot_map(struct trace_array *tr)
> > > +{
> > > +	spin_lock(&tr->snapshot_trigger_lock);
> > > +	if (!WARN_ON(!tr->mapped))
> > > +		tr->mapped--;
> > > +	spin_unlock(&tr->snapshot_trigger_lock);
> > > +}
> > > +#else
> > > +static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
> > > +static inline void put_snapshot_map(struct trace_array *tr) { }
> > > +#endif
> > > +
> > > +static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
> > > +{
> > > +	struct ftrace_buffer_info *info = vma->vm_file->private_data;
> > > +	struct trace_iterator *iter = &info->iter;
> > > +
> > > +	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
> > > +	put_snapshot_map(iter->tr);
> > > +}
> > > +
> > > +static void tracing_buffers_mmap_open(struct vm_area_struct *vma) { }  
> > 
> > Same for the open.
> > 
> > 
> > > +
> > > +static const struct vm_operations_struct tracing_buffers_vmops = {
> > > +	.open		= tracing_buffers_mmap_open,
> > > +	.close		= tracing_buffers_mmap_close,
> > > +	.fault		= tracing_buffers_mmap_fault,
> > > +};  
> > 
> > I replaced this with:
> > 
> > static const struct vm_operations_struct tracing_buffers_vmops = {
> > 	.close		= tracing_buffers_mmap_close,
> > };
> > 
> > And it appears to work just fine. The mm code handles the NULL cases for
> > .open and .fault.
> > 
> > Is there any reason to do something different than the mm defaults?

No other reason here than my own ignorance. I will remove.

> 
> Hi Vincent,
> 
> Do you plan on sending out a v20 series?

Of course, let me spin that this week! Got also few typos to fix in the doc and
I believe an include missing for riscv.

> 
> -- Steve
Steven Rostedt April 3, 2024, 4:03 p.m. UTC | #4
On Wed, 3 Apr 2024 15:39:44 +0100
Vincent Donnefort <vdonnefort@google.com> wrote:

> > Do you plan on sending out a v20 series?  
> 
> Of course, let me spin that this week! Got also few typos to fix in the doc and
> I believe an include missing for riscv.

No rush, I'll be on PTO until next Tuesday, and will not get to it before then.

-- Steve
diff mbox series

Patch

diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
index ffcd8dfcaa4f..d25b9d504a7c 100644
--- a/include/uapi/linux/trace_mmap.h
+++ b/include/uapi/linux/trace_mmap.h
@@ -43,4 +43,6 @@  struct trace_buffer_meta {
 	__u64	Reserved2;
 };
 
+#define TRACE_MMAP_IOCTL_GET_READER		_IO('T', 0x1)
+
 #endif /* _TRACE_MMAP_H_ */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 233d1af39fff..0f37aa9860fd 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1191,6 +1191,12 @@  static void tracing_snapshot_instance_cond(struct trace_array *tr,
 		return;
 	}
 
+	if (tr->mapped) {
+		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
+		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
+		return;
+	}
+
 	local_irq_save(flags);
 	update_max_tr(tr, current, smp_processor_id(), cond_data);
 	local_irq_restore(flags);
@@ -1323,7 +1329,7 @@  static int tracing_arm_snapshot_locked(struct trace_array *tr)
 	lockdep_assert_held(&trace_types_lock);
 
 	spin_lock(&tr->snapshot_trigger_lock);
-	if (tr->snapshot == UINT_MAX) {
+	if (tr->snapshot == UINT_MAX || tr->mapped) {
 		spin_unlock(&tr->snapshot_trigger_lock);
 		return -EBUSY;
 	}
@@ -6068,7 +6074,7 @@  static void tracing_set_nop(struct trace_array *tr)
 {
 	if (tr->current_trace == &nop_trace)
 		return;
-	
+
 	tr->current_trace->enabled--;
 
 	if (tr->current_trace->reset)
@@ -8194,15 +8200,32 @@  tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 	return ret;
 }
 
-/* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 	struct ftrace_buffer_info *info = file->private_data;
 	struct trace_iterator *iter = &info->iter;
+	int err;
+
+	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
+		if (!(file->f_flags & O_NONBLOCK)) {
+			err = ring_buffer_wait(iter->array_buffer->buffer,
+					       iter->cpu_file,
+					       iter->tr->buffer_percent,
+					       NULL, NULL);
+			if (err)
+				return err;
+		}
 
-	if (cmd)
-		return -ENOIOCTLCMD;
+		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
+						  iter->cpu_file);
+	} else if (cmd) {
+		return -ENOTTY;
+	}
 
+	/*
+	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
+	 * waiters
+	 */
 	mutex_lock(&trace_types_lock);
 
 	/* Make sure the waiters see the new wait_index */
@@ -8214,6 +8237,94 @@  static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned
 	return 0;
 }
 
+static vm_fault_t tracing_buffers_mmap_fault(struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+static int get_snapshot_map(struct trace_array *tr)
+{
+	int err = 0;
+
+	/*
+	 * Called with mmap_lock held. lockdep would be unhappy if we would now
+	 * take trace_types_lock. Instead use the specific
+	 * snapshot_trigger_lock.
+	 */
+	spin_lock(&tr->snapshot_trigger_lock);
+
+	if (tr->snapshot || tr->mapped == UINT_MAX)
+		err = -EBUSY;
+	else
+		tr->mapped++;
+
+	spin_unlock(&tr->snapshot_trigger_lock);
+
+	/* Wait for update_max_tr() to observe iter->tr->mapped */
+	if (tr->mapped == 1)
+		synchronize_rcu();
+
+	return err;
+
+}
+static void put_snapshot_map(struct trace_array *tr)
+{
+	spin_lock(&tr->snapshot_trigger_lock);
+	if (!WARN_ON(!tr->mapped))
+		tr->mapped--;
+	spin_unlock(&tr->snapshot_trigger_lock);
+}
+#else
+static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
+static inline void put_snapshot_map(struct trace_array *tr) { }
+#endif
+
+static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
+{
+	struct ftrace_buffer_info *info = vma->vm_file->private_data;
+	struct trace_iterator *iter = &info->iter;
+
+	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
+	put_snapshot_map(iter->tr);
+}
+
+static void tracing_buffers_mmap_open(struct vm_area_struct *vma) { }
+
+static const struct vm_operations_struct tracing_buffers_vmops = {
+	.open		= tracing_buffers_mmap_open,
+	.close		= tracing_buffers_mmap_close,
+	.fault		= tracing_buffers_mmap_fault,
+};
+
+static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct ftrace_buffer_info *info = filp->private_data;
+	struct trace_iterator *iter = &info->iter;
+	int ret = 0;
+
+	if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
+	    !(vma->vm_flags & VM_MAYSHARE))
+		return -EPERM;
+
+	vm_flags_mod(vma,
+		     VM_MIXEDMAP | VM_PFNMAP |
+		     VM_DONTCOPY | VM_DONTDUMP | VM_DONTEXPAND | VM_IO,
+		     VM_MAYWRITE);
+
+	vma->vm_ops = &tracing_buffers_vmops;
+
+	ret = get_snapshot_map(iter->tr);
+	if (ret)
+		return ret;
+
+	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
+	if (ret)
+		put_snapshot_map(iter->tr);
+
+	return ret;
+}
+
 static const struct file_operations tracing_buffers_fops = {
 	.open		= tracing_buffers_open,
 	.read		= tracing_buffers_read,
@@ -8223,6 +8334,7 @@  static const struct file_operations tracing_buffers_fops = {
 	.splice_read	= tracing_buffers_splice_read,
 	.unlocked_ioctl = tracing_buffers_ioctl,
 	.llseek		= no_llseek,
+	.mmap		= tracing_buffers_mmap,
 };
 
 static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 64450615ca0c..749a182dab48 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -336,6 +336,7 @@  struct trace_array {
 	bool			allocated_snapshot;
 	spinlock_t		snapshot_trigger_lock;
 	unsigned int		snapshot;
+	unsigned int		mapped;
 	unsigned long		max_latency;
 #ifdef CONFIG_FSNOTIFY
 	struct dentry		*d_max_latency;