diff mbox series

[2/3] mm/percpu.c: optimize the code in pcpu_setup_first_chunk() a little bit

Message ID 20230721131800.20003-3-bhe@redhat.com (mailing list archive)
State New
Headers show
Series percpu: some trivial cleanup patches | expand

Commit Message

Baoquan He July 21, 2023, 1:17 p.m. UTC
This removes the need of local varibale 'chunk', and optimize the code
calling pcpu_alloc_first_chunk() to initialize reserved chunk and
dynamic chunk to make it simpler.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/percpu.c | 32 +++++++++++++-------------------
 1 file changed, 13 insertions(+), 19 deletions(-)

Comments

Dennis Zhou July 21, 2023, 9:01 p.m. UTC | #1
Hello,

On Fri, Jul 21, 2023 at 09:17:59PM +0800, Baoquan He wrote:
> This removes the need of local varibale 'chunk', and optimize the code
> calling pcpu_alloc_first_chunk() to initialize reserved chunk and
> dynamic chunk to make it simpler.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/percpu.c | 32 +++++++++++++-------------------
>  1 file changed, 13 insertions(+), 19 deletions(-)
> 
> diff --git a/mm/percpu.c b/mm/percpu.c
> index 1480bf283d11..c25b058a46ad 100644
> --- a/mm/percpu.c
> +++ b/mm/percpu.c
> @@ -2581,7 +2581,6 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
>  {
>  	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
>  	size_t static_size, dyn_size;
> -	struct pcpu_chunk *chunk;
>  	unsigned long *group_offsets;
>  	size_t *group_sizes;
>  	unsigned long *unit_off;
> @@ -2697,7 +2696,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
>  	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
>  	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
>  	pcpu_atom_size = ai->atom_size;
> -	pcpu_chunk_struct_size = struct_size(chunk, populated,
> +	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
>  					     BITS_TO_LONGS(pcpu_unit_pages));
>  
>  	pcpu_stats_save_ai(ai);
> @@ -2735,28 +2734,23 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
>  
>  	/*
>  	 * Initialize first chunk.
> -	 * If the reserved_size is non-zero, this initializes the reserved
> -	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
> -	 * and the dynamic region is initialized here.  The first chunk,
> -	 * pcpu_first_chunk, will always point to the chunk that serves
> -	 * the dynamic region.
> +	 * If the reserved_size is non-zero, initializes the reserved chunk
                                         ^initialize
> +	 * firstly. If the reserved_size is zero, the reserved chunk is NULL
        ^ can remove firstly.
> +	 * and the dynamic region is initialized directly. The first chunk,
> +	 * pcpu_first_chunk, will always point to the chunk that serves the
> +	 * dynamic region.

Reading this, I'll probably reword this comment to explain the reserved
chunk better.

>  	 */
>  	tmp_addr = (unsigned long)base_addr + static_size;
> -	map_size = ai->reserved_size ?: dyn_size;
> -	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> -
> -	/* init dynamic chunk if necessary */
>  	if (ai->reserved_size) {
> -		pcpu_reserved_chunk = chunk;
> -
> -		tmp_addr = (unsigned long)base_addr + static_size +
> -			   ai->reserved_size;
> -		map_size = dyn_size;
> -		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> +		map_size = ai->reserved_size;
> +		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
>  	}
>  
> -	/* link the first chunk in */
> -	pcpu_first_chunk = chunk;
> +	/* init dynamic chunk if necessary */
> +	tmp_addr += (unsigned long)ai->reserved_size;

I'm not a big fan of += the tmp_addr as I personally find it easier to
read if it's just laid out explicitly.

> +	map_size = dyn_size;
> +	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> +
>  	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
>  	pcpu_chunk_relocate(pcpu_first_chunk, -1);
>  
> -- 
> 2.34.1
> 

Overall, I think this is good, but I'd go 1 step further and get rid of
map_size. Regarding tmp_addr, I'd prefer if we kept all the math
together.

Thanks,
Dennis
Baoquan He July 22, 2023, 1:14 a.m. UTC | #2
On 07/21/23 at 02:01pm, Dennis Zhou wrote:
> Hello,
> 
> On Fri, Jul 21, 2023 at 09:17:59PM +0800, Baoquan He wrote:
> > This removes the need of local varibale 'chunk', and optimize the code
> > calling pcpu_alloc_first_chunk() to initialize reserved chunk and
> > dynamic chunk to make it simpler.
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> >  mm/percpu.c | 32 +++++++++++++-------------------
> >  1 file changed, 13 insertions(+), 19 deletions(-)
> > 
> > diff --git a/mm/percpu.c b/mm/percpu.c
> > index 1480bf283d11..c25b058a46ad 100644
> > --- a/mm/percpu.c
> > +++ b/mm/percpu.c
> > @@ -2581,7 +2581,6 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
> >  {
> >  	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
> >  	size_t static_size, dyn_size;
> > -	struct pcpu_chunk *chunk;
> >  	unsigned long *group_offsets;
> >  	size_t *group_sizes;
> >  	unsigned long *unit_off;
> > @@ -2697,7 +2696,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
> >  	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
> >  	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
> >  	pcpu_atom_size = ai->atom_size;
> > -	pcpu_chunk_struct_size = struct_size(chunk, populated,
> > +	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
> >  					     BITS_TO_LONGS(pcpu_unit_pages));
> >  
> >  	pcpu_stats_save_ai(ai);
> > @@ -2735,28 +2734,23 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
> >  
> >  	/*
> >  	 * Initialize first chunk.
> > -	 * If the reserved_size is non-zero, this initializes the reserved
> > -	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
> > -	 * and the dynamic region is initialized here.  The first chunk,
> > -	 * pcpu_first_chunk, will always point to the chunk that serves
> > -	 * the dynamic region.
> > +	 * If the reserved_size is non-zero, initializes the reserved chunk
>                                          ^initialize
> > +	 * firstly. If the reserved_size is zero, the reserved chunk is NULL
>         ^ can remove firstly.
> > +	 * and the dynamic region is initialized directly. The first chunk,
> > +	 * pcpu_first_chunk, will always point to the chunk that serves the
> > +	 * dynamic region.
> 
> Reading this, I'll probably reword this comment to explain the reserved
> chunk better.

Agree. The expression is a little messy and too colloquial.

> 
> >  	 */
> >  	tmp_addr = (unsigned long)base_addr + static_size;
> > -	map_size = ai->reserved_size ?: dyn_size;
> > -	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> > -
> > -	/* init dynamic chunk if necessary */
> >  	if (ai->reserved_size) {
> > -		pcpu_reserved_chunk = chunk;
> > -
> > -		tmp_addr = (unsigned long)base_addr + static_size +
> > -			   ai->reserved_size;
> > -		map_size = dyn_size;
> > -		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> > +		map_size = ai->reserved_size;
> > +		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> >  	}
> >  
> > -	/* link the first chunk in */
> > -	pcpu_first_chunk = chunk;
> > +	/* init dynamic chunk if necessary */
> > +	tmp_addr += (unsigned long)ai->reserved_size;
> 
> I'm not a big fan of += the tmp_addr as I personally find it easier to
> read if it's just laid out explicitly.

OK, will change.

> 
> > +	map_size = dyn_size;
> > +	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
> > +
> >  	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
> >  	pcpu_chunk_relocate(pcpu_first_chunk, -1);
> >  
> > -- 
> > 2.34.1
> > 
> 
> Overall, I think this is good, but I'd go 1 step further and get rid of
> map_size. Regarding tmp_addr, I'd prefer if we kept all the math
> together.

Makes sense. Thanks a lot for your careful review and great suggestions.

According to your comments, I made a draft v2. Please help check if I
have got them correctly and if the new change is OK to you.

From 17832ce8a755d8327b853a18c6f1cc00c9f93e50 Mon Sep 17 00:00:00 2001
From: Baoquan He <bhe@redhat.com>
Date: Tue, 27 Jun 2023 09:33:28 +0800
Subject: [PATCH] mm/percpu.c: optimize the code in pcpu_setup_first_chunk() a
 little bit
Content-type: text/plain

This removes the need of local varibale 'chunk', and optimize the code
calling pcpu_alloc_first_chunk() to initialize reserved chunk and
dynamic chunk to make it simpler.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/percpu.c | 36 +++++++++++++-----------------------
 1 file changed, 13 insertions(+), 23 deletions(-)

diff --git a/mm/percpu.c b/mm/percpu.c
index 1480bf283d11..83fc47206680 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2581,14 +2581,12 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 {
 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
 	size_t static_size, dyn_size;
-	struct pcpu_chunk *chunk;
 	unsigned long *group_offsets;
 	size_t *group_sizes;
 	unsigned long *unit_off;
 	unsigned int cpu;
 	int *unit_map;
 	int group, unit, i;
-	int map_size;
 	unsigned long tmp_addr;
 	size_t alloc_size;
 
@@ -2697,7 +2695,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
 	pcpu_atom_size = ai->atom_size;
-	pcpu_chunk_struct_size = struct_size(chunk, populated,
+	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
 					     BITS_TO_LONGS(pcpu_unit_pages));
 
 	pcpu_stats_save_ai(ai);
@@ -2734,29 +2732,21 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 	dyn_size = ai->dyn_size - (static_size - ai->static_size);
 
 	/*
-	 * Initialize first chunk.
-	 * If the reserved_size is non-zero, this initializes the reserved
-	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
-	 * and the dynamic region is initialized here.  The first chunk,
-	 * pcpu_first_chunk, will always point to the chunk that serves
-	 * the dynamic region.
+	 * Initialize first chunk:
+	 *
+	 * - If the reserved_size is non-zero, initialize the reserved
+	 *   chunk firstly. Otherwise, the reserved chunk is NULL.
+	 *
+	 * - The first chunk, pcpu_first_chunk, always points to the
+	 *   chunk that serves the dynamic region.
 	 */
 	tmp_addr = (unsigned long)base_addr + static_size;
-	map_size = ai->reserved_size ?: dyn_size;
-	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
-
-	/* init dynamic chunk if necessary */
-	if (ai->reserved_size) {
-		pcpu_reserved_chunk = chunk;
-
-		tmp_addr = (unsigned long)base_addr + static_size +
-			   ai->reserved_size;
-		map_size = dyn_size;
-		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
-	}
+	if (ai->reserved_size)
+		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
+						ai->reserved_size);
+	tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
+	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
 
-	/* link the first chunk in */
-	pcpu_first_chunk = chunk;
 	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
 	pcpu_chunk_relocate(pcpu_first_chunk, -1);
diff mbox series

Patch

diff --git a/mm/percpu.c b/mm/percpu.c
index 1480bf283d11..c25b058a46ad 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2581,7 +2581,6 @@  void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 {
 	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
 	size_t static_size, dyn_size;
-	struct pcpu_chunk *chunk;
 	unsigned long *group_offsets;
 	size_t *group_sizes;
 	unsigned long *unit_off;
@@ -2697,7 +2696,7 @@  void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
 	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
 	pcpu_atom_size = ai->atom_size;
-	pcpu_chunk_struct_size = struct_size(chunk, populated,
+	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
 					     BITS_TO_LONGS(pcpu_unit_pages));
 
 	pcpu_stats_save_ai(ai);
@@ -2735,28 +2734,23 @@  void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
 	/*
 	 * Initialize first chunk.
-	 * If the reserved_size is non-zero, this initializes the reserved
-	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
-	 * and the dynamic region is initialized here.  The first chunk,
-	 * pcpu_first_chunk, will always point to the chunk that serves
-	 * the dynamic region.
+	 * If the reserved_size is non-zero, initializes the reserved chunk
+	 * firstly. If the reserved_size is zero, the reserved chunk is NULL
+	 * and the dynamic region is initialized directly. The first chunk,
+	 * pcpu_first_chunk, will always point to the chunk that serves the
+	 * dynamic region.
 	 */
 	tmp_addr = (unsigned long)base_addr + static_size;
-	map_size = ai->reserved_size ?: dyn_size;
-	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
-
-	/* init dynamic chunk if necessary */
 	if (ai->reserved_size) {
-		pcpu_reserved_chunk = chunk;
-
-		tmp_addr = (unsigned long)base_addr + static_size +
-			   ai->reserved_size;
-		map_size = dyn_size;
-		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
+		map_size = ai->reserved_size;
+		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
 	}
 
-	/* link the first chunk in */
-	pcpu_first_chunk = chunk;
+	/* init dynamic chunk if necessary */
+	tmp_addr += (unsigned long)ai->reserved_size;
+	map_size = dyn_size;
+	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
+
 	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
 	pcpu_chunk_relocate(pcpu_first_chunk, -1);