diff mbox series

[v2,2/9] mm: vmalloc: Rename adjust_va_to_fit_type() function

Message ID 20230829081142.3619-3-urezki@gmail.com (mailing list archive)
State New
Headers show
Series Mitigate a vmap lock contention v2 | expand

Commit Message

Uladzislau Rezki Aug. 29, 2023, 8:11 a.m. UTC
This patch renames the adjust_va_to_fit_type() function
to va_clip() which is shorter and more expressive.

There is no a functional change as a result of this patch.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

Comments

Baoquan He Sept. 6, 2023, 5:51 a.m. UTC | #1
On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote:
> This patch renames the adjust_va_to_fit_type() function
> to va_clip() which is shorter and more expressive.
> 
> There is no a functional change as a result of this patch.
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> ---
>  mm/vmalloc.c | 13 ++++++-------
>  1 file changed, 6 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 00afc1ee4756..09e315f8ea34 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1382,9 +1382,9 @@ classify_va_fit_type(struct vmap_area *va,
>  }
>  
>  static __always_inline int
> -adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> -		      struct vmap_area *va, unsigned long nva_start_addr,
> -		      unsigned long size)
> +va_clip(struct rb_root *root, struct list_head *head,
> +		struct vmap_area *va, unsigned long nva_start_addr,
> +		unsigned long size)
>  {
>  	struct vmap_area *lva = NULL;
>  	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
> @@ -1500,7 +1500,7 @@ va_alloc(struct vmap_area *va,
>  		return vend;
>  
>  	/* Update the free vmap_area. */
> -	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
> +	ret = va_clip(root, head, va, nva_start_addr, size);
>  	if (WARN_ON_ONCE(ret))
>  		return vend;
>  
> @@ -4151,9 +4151,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  			/* It is a BUG(), but trigger recovery instead. */
>  			goto recovery;
>  
> -		ret = adjust_va_to_fit_type(&free_vmap_area_root,
> -					    &free_vmap_area_list,
> -					    va, start, size);
> +		ret = va_clip(&free_vmap_area_root,
> +			&free_vmap_area_list, va, start, size);
>  		if (WARN_ON_ONCE(unlikely(ret)))
>  			/* It is a BUG(), but trigger recovery instead. */
>  			goto recovery;
> -- 
> 2.30.2
> 

Reviewed-by: Baoquan He <bhe@redhat.com>
Uladzislau Rezki Sept. 6, 2023, 4:27 p.m. UTC | #2
On Wed, Sep 06, 2023 at 01:51:42PM +0800, Baoquan He wrote:
> On 08/29/23 at 10:11am, Uladzislau Rezki (Sony) wrote:
> > This patch renames the adjust_va_to_fit_type() function
> > to va_clip() which is shorter and more expressive.
> > 
> > There is no a functional change as a result of this patch.
> > 
> > Reviewed-by: Christoph Hellwig <hch@lst.de>
> > Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
> > Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
> > ---
> >  mm/vmalloc.c | 13 ++++++-------
> >  1 file changed, 6 insertions(+), 7 deletions(-)
> > 
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 00afc1ee4756..09e315f8ea34 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -1382,9 +1382,9 @@ classify_va_fit_type(struct vmap_area *va,
> >  }
> >  
> >  static __always_inline int
> > -adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> > -		      struct vmap_area *va, unsigned long nva_start_addr,
> > -		      unsigned long size)
> > +va_clip(struct rb_root *root, struct list_head *head,
> > +		struct vmap_area *va, unsigned long nva_start_addr,
> > +		unsigned long size)
> >  {
> >  	struct vmap_area *lva = NULL;
> >  	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
> > @@ -1500,7 +1500,7 @@ va_alloc(struct vmap_area *va,
> >  		return vend;
> >  
> >  	/* Update the free vmap_area. */
> > -	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
> > +	ret = va_clip(root, head, va, nva_start_addr, size);
> >  	if (WARN_ON_ONCE(ret))
> >  		return vend;
> >  
> > @@ -4151,9 +4151,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> >  			/* It is a BUG(), but trigger recovery instead. */
> >  			goto recovery;
> >  
> > -		ret = adjust_va_to_fit_type(&free_vmap_area_root,
> > -					    &free_vmap_area_list,
> > -					    va, start, size);
> > +		ret = va_clip(&free_vmap_area_root,
> > +			&free_vmap_area_list, va, start, size);
> >  		if (WARN_ON_ONCE(unlikely(ret)))
> >  			/* It is a BUG(), but trigger recovery instead. */
> >  			goto recovery;
> > -- 
> > 2.30.2
> > 
> 
> Reviewed-by: Baoquan He <bhe@redhat.com>
> 
Thank you for the review. Picked it up.

--
Uladzislau Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 00afc1ee4756..09e315f8ea34 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1382,9 +1382,9 @@  classify_va_fit_type(struct vmap_area *va,
 }
 
 static __always_inline int
-adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
-		      struct vmap_area *va, unsigned long nva_start_addr,
-		      unsigned long size)
+va_clip(struct rb_root *root, struct list_head *head,
+		struct vmap_area *va, unsigned long nva_start_addr,
+		unsigned long size)
 {
 	struct vmap_area *lva = NULL;
 	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
@@ -1500,7 +1500,7 @@  va_alloc(struct vmap_area *va,
 		return vend;
 
 	/* Update the free vmap_area. */
-	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
+	ret = va_clip(root, head, va, nva_start_addr, size);
 	if (WARN_ON_ONCE(ret))
 		return vend;
 
@@ -4151,9 +4151,8 @@  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 			/* It is a BUG(), but trigger recovery instead. */
 			goto recovery;
 
-		ret = adjust_va_to_fit_type(&free_vmap_area_root,
-					    &free_vmap_area_list,
-					    va, start, size);
+		ret = va_clip(&free_vmap_area_root,
+			&free_vmap_area_list, va, start, size);
 		if (WARN_ON_ONCE(unlikely(ret)))
 			/* It is a BUG(), but trigger recovery instead. */
 			goto recovery;