diff mbox series

[v20,1/5] ring-buffer: allocate sub-buffers with __GFP_COMP

Message ID 20240406173649.3210836-2-vdonnefort@google.com (mailing list archive)
State Superseded
Headers show
Series Introducing trace buffer mapping by user-space | expand

Commit Message

Vincent Donnefort April 6, 2024, 5:36 p.m. UTC
In preparation for the ring-buffer memory mapping, allocate compound
pages for the ring-buffer sub-buffers to enable us to map them to
user-space with vm_insert_pages().

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>

Comments

Steven Rostedt April 10, 2024, 5:36 p.m. UTC | #1
Hi Vincent,

Thanks for sending this. Nit: Subject should start with a capital:

  ring-buffer: Allocate sub-buffers with __GFP_COMP

-- Steve


On Sat,  6 Apr 2024 18:36:45 +0100
Vincent Donnefort <vdonnefort@google.com> wrote:

> In preparation for the ring-buffer memory mapping, allocate compound
> pages for the ring-buffer sub-buffers to enable us to map them to
> user-space with vm_insert_pages().
> 
> Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
> 
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 25476ead681b..cc9ebe593571 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -1524,7 +1524,7 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
>  		list_add(&bpage->list, pages);
>  
>  		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
> -					mflags | __GFP_ZERO,
> +					mflags | __GFP_COMP | __GFP_ZERO,
>  					cpu_buffer->buffer->subbuf_order);
>  		if (!page)
>  			goto free_pages;
> @@ -1609,7 +1609,7 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
>  
>  	cpu_buffer->reader_page = bpage;
>  
> -	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
> +	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
>  				cpu_buffer->buffer->subbuf_order);
>  	if (!page)
>  		goto fail_free_reader;
> @@ -5579,7 +5579,7 @@ ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
>  		goto out;
>  
>  	page = alloc_pages_node(cpu_to_node(cpu),
> -				GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
> +				GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
>  				cpu_buffer->buffer->subbuf_order);
>  	if (!page) {
>  		kfree(bpage);
diff mbox series

Patch

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 25476ead681b..cc9ebe593571 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1524,7 +1524,7 @@  static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 		list_add(&bpage->list, pages);
 
 		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
-					mflags | __GFP_ZERO,
+					mflags | __GFP_COMP | __GFP_ZERO,
 					cpu_buffer->buffer->subbuf_order);
 		if (!page)
 			goto free_pages;
@@ -1609,7 +1609,7 @@  rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
 
 	cpu_buffer->reader_page = bpage;
 
-	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
+	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
 				cpu_buffer->buffer->subbuf_order);
 	if (!page)
 		goto fail_free_reader;
@@ -5579,7 +5579,7 @@  ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
 		goto out;
 
 	page = alloc_pages_node(cpu_to_node(cpu),
-				GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
+				GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
 				cpu_buffer->buffer->subbuf_order);
 	if (!page) {
 		kfree(bpage);