diff mbox series

[5/5] mm/vmalloc.c: return explicit error value in alloc_vmap_area()

Message ID 20250415023952.27850-6-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm/vmalloc.c: code cleanup and improvements | expand

Commit Message

Baoquan He April 15, 2025, 2:39 a.m. UTC
In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
indicate if the allocation is successful or failed. That is not very clear.

Here change to return explicit error values and check them to judge if
allocation is successful.

IS_ERR_VALUE already uses unlikely() internally

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/vmalloc.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

Comments

Baoquan He April 15, 2025, 6:44 a.m. UTC | #1
On 04/15/25 at 10:39am, Baoquan He wrote:
> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
> indicate if the allocation is successful or failed. That is not very clear.
> 
> Here change to return explicit error values and check them to judge if
> allocation is successful.
> 

> IS_ERR_VALUE already uses unlikely() internally
  ^^^^^^^^^
Sorry, above line was added mistakenly in log draft, should be removed.

> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 34 +++++++++++++++++-----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 3f38a232663b..5b21cd09b2b4 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>  			 */
>  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
>  			if (!lva)
> -				return -1;
> +				return -ENOMEM;
>  		}
>  
>  		/*
> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>  		 */
>  		va->va_start = nva_start_addr + size;
>  	} else {
> -		return -1;
> +		return -EINVAL;
>  	}
>  
>  	if (type != FL_FIT_TYPE) {
> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va,
>  
>  	/* Check the "vend" restriction. */
>  	if (nva_start_addr + size > vend)
> -		return vend;
> +		return -ERANGE;
>  
>  	/* Update the free vmap_area. */
>  	ret = va_clip(root, head, va, nva_start_addr, size);
> -	if (WARN_ON_ONCE(ret))
> -		return vend;
> +	if (ret)
> +		return ret;
>  
>  	return nva_start_addr;
>  }
>  
>  /*
>   * Returns a start address of the newly allocated area, if success.
> - * Otherwise a vend is returned that indicates failure.
> + * Otherwise an error value is returned that indicates failure.
>   */
>  static __always_inline unsigned long
>  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>  
>  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
>  	if (unlikely(!va))
> -		return vend;
> +		return -ENOENT;
>  
>  	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
> -	if (nva_start_addr == vend)
> -		return vend;
>  
>  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> -	find_vmap_lowest_match_check(root, head, size, align);
> +	if (!IS_ERR_VALUE(nva_start_addr))
> +		find_vmap_lowest_match_check(root, head, size, align);
>  #endif
>  
>  	return nva_start_addr;
> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align,
>  	struct vmap_area *va;
>  
>  	*vn_id = 0;
> -	*addr = vend;
> +	*addr = -EINVAL;
>  
>  	/*
>  	 * Fallback to a global heap if not vmalloc or there
> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	}
>  
>  retry:
> -	if (addr == vend) {
> +	if (IS_ERR_VALUE(addr)) {
>  		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
>  		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
>  			size, align, vstart, vend);
>  		spin_unlock(&free_vmap_area_lock);
>  	}
>  
> -	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> +	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
>  
>  	/*
> -	 * If an allocation fails, the "vend" address is
> +	 * If an allocation fails, the error value is
>  	 * returned. Therefore trigger the overflow path.
>  	 */
> -	if (unlikely(addr == vend))
> +	if (IS_ERR_VALUE(addr))
>  		goto overflow;
>  
>  	va->va_start = addr;
> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  
>  		ret = va_clip(&free_vmap_area_root,
>  			&free_vmap_area_list, va, start, size);
> -		if (WARN_ON_ONCE(unlikely(ret)))
> -			/* It is a BUG(), but trigger recovery instead. */
> +		if ((unlikely(ret))) {
> +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
>  			goto recovery;
> +		}
>  
>  		/* Allocated area. */
>  		va = vas[area];
> -- 
> 2.41.0
>
Shivank Garg April 15, 2025, 7:22 a.m. UTC | #2
On 4/15/2025 8:09 AM, Baoquan He wrote:
> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
> indicate if the allocation is successful or failed. That is not very clear.
> 
> Here change to return explicit error values and check them to judge if
> allocation is successful.
> 
> IS_ERR_VALUE already uses unlikely() internally
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>  mm/vmalloc.c | 34 +++++++++++++++++-----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 3f38a232663b..5b21cd09b2b4 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>  			 */
>  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
>  			if (!lva)
> -				return -1;
> +				return -ENOMEM;
>  		}
>  
>  		/*
> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>  		 */
>  		va->va_start = nva_start_addr + size;
>  	} else {
> -		return -1;
> +		return -EINVAL;
>  	}

Braces around return -EINVAL seem unnecessary.
They can be dropped.

>  
>  	if (type != FL_FIT_TYPE) {
> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va,
>  
>  	/* Check the "vend" restriction. */
>  	if (nva_start_addr + size > vend)
> -		return vend;
> +		return -ERANGE;
>  
>  	/* Update the free vmap_area. */
>  	ret = va_clip(root, head, va, nva_start_addr, size);
> -	if (WARN_ON_ONCE(ret))
> -		return vend;
> +	if (ret)
> +		return ret;

Is it safe to remove the warning, or was it critical for debugging?

>  
>  	return nva_start_addr;
>  }
>  
>  /*
>   * Returns a start address of the newly allocated area, if success.
> - * Otherwise a vend is returned that indicates failure.
> + * Otherwise an error value is returned that indicates failure.
>   */
>  static __always_inline unsigned long
>  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>  
>  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
>  	if (unlikely(!va))
> -		return vend;
> +		return -ENOENT;
>  
>  	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
> -	if (nva_start_addr == vend)
> -		return vend;
>  
>  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> -	find_vmap_lowest_match_check(root, head, size, align);
> +	if (!IS_ERR_VALUE(nva_start_addr))
> +		find_vmap_lowest_match_check(root, head, size, align);
>  #endif
>  
>  	return nva_start_addr;
> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align,
>  	struct vmap_area *va;
>  
>  	*vn_id = 0;
> -	*addr = vend;
> +	*addr = -EINVAL;
>  
>  	/*
>  	 * Fallback to a global heap if not vmalloc or there
> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	}
>  
>  retry:
> -	if (addr == vend) {
> +	if (IS_ERR_VALUE(addr)) {
>  		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
>  		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
>  			size, align, vstart, vend);
>  		spin_unlock(&free_vmap_area_lock);
>  	}
>  
> -	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> +	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
>  
>  	/*
> -	 * If an allocation fails, the "vend" address is
> +	 * If an allocation fails, the error value is
>  	 * returned. Therefore trigger the overflow path.
>  	 */
> -	if (unlikely(addr == vend))
> +	if (IS_ERR_VALUE(addr))
>  		goto overflow;
>  
>  	va->va_start = addr;
> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>  
>  		ret = va_clip(&free_vmap_area_root,
>  			&free_vmap_area_list, va, start, size);
> -		if (WARN_ON_ONCE(unlikely(ret)))
> -			/* It is a BUG(), but trigger recovery instead. */
> +		if ((unlikely(ret))) {
		    ^^		   ^^
The extra parentheses are redundant and can be removed for clarity.

> +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
>  			goto recovery;
> +		}
>  
>  		/* Allocated area. */
>  		va = vas[area];
Baoquan He April 15, 2025, 1:01 p.m. UTC | #3
On 04/15/25 at 12:52pm, Shivank Garg wrote:
> On 4/15/2025 8:09 AM, Baoquan He wrote:
> > In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
> > indicate if the allocation is successful or failed. That is not very clear.
> > 
> > Here change to return explicit error values and check them to judge if
> > allocation is successful.
> > 
> > IS_ERR_VALUE already uses unlikely() internally
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> > ---
> >  mm/vmalloc.c | 34 +++++++++++++++++-----------------
> >  1 file changed, 17 insertions(+), 17 deletions(-)
> > 
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 3f38a232663b..5b21cd09b2b4 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> >  			 */
> >  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
> >  			if (!lva)
> > -				return -1;
> > +				return -ENOMEM;
> >  		}
> >  
> >  		/*
> > @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> >  		 */
> >  		va->va_start = nva_start_addr + size;
> >  	} else {
> > -		return -1;
> > +		return -EINVAL;
> >  	}

Thanks for reviewing.

> 
> Braces around return -EINVAL seem unnecessary.
> They can be dropped.

This complys with the codeing style required in 3) Placing Braces and
Spaces of Documentation/process/coding-style.rst because other branches
are multiple statements.

> 
> >  
> >  	if (type != FL_FIT_TYPE) {
> > @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va,
> >  
> >  	/* Check the "vend" restriction. */
> >  	if (nva_start_addr + size > vend)
> > -		return vend;
> > +		return -ERANGE;
> >  
> >  	/* Update the free vmap_area. */
> >  	ret = va_clip(root, head, va, nva_start_addr, size);
> > -	if (WARN_ON_ONCE(ret))
> > -		return vend;
> > +	if (ret)
> > +		return ret;
> 
> Is it safe to remove the warning, or was it critical for debugging?

This comes from a reported concern because va_clip() could be failed by 
NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause
confusion misleading people to think vmap area management is failed.

> 
> >  
> >  	return nva_start_addr;
> >  }
> >  
> >  /*
> >   * Returns a start address of the newly allocated area, if success.
> > - * Otherwise a vend is returned that indicates failure.
> > + * Otherwise an error value is returned that indicates failure.
> >   */
> >  static __always_inline unsigned long
> >  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> > @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> >  
> >  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
> >  	if (unlikely(!va))
> > -		return vend;
> > +		return -ENOENT;
> >  
> >  	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
> > -	if (nva_start_addr == vend)
> > -		return vend;
> >  
> >  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> > -	find_vmap_lowest_match_check(root, head, size, align);
> > +	if (!IS_ERR_VALUE(nva_start_addr))
> > +		find_vmap_lowest_match_check(root, head, size, align);
> >  #endif
> >  
> >  	return nva_start_addr;
> > @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align,
> >  	struct vmap_area *va;
> >  
> >  	*vn_id = 0;
> > -	*addr = vend;
> > +	*addr = -EINVAL;
> >  
> >  	/*
> >  	 * Fallback to a global heap if not vmalloc or there
> > @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> >  	}
> >  
> >  retry:
> > -	if (addr == vend) {
> > +	if (IS_ERR_VALUE(addr)) {
> >  		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
> >  		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> >  			size, align, vstart, vend);
> >  		spin_unlock(&free_vmap_area_lock);
> >  	}
> >  
> > -	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> > +	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
> >  
> >  	/*
> > -	 * If an allocation fails, the "vend" address is
> > +	 * If an allocation fails, the error value is
> >  	 * returned. Therefore trigger the overflow path.
> >  	 */
> > -	if (unlikely(addr == vend))
> > +	if (IS_ERR_VALUE(addr))
> >  		goto overflow;
> >  
> >  	va->va_start = addr;
> > @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> >  
> >  		ret = va_clip(&free_vmap_area_root,
> >  			&free_vmap_area_list, va, start, size);
> > -		if (WARN_ON_ONCE(unlikely(ret)))
> > -			/* It is a BUG(), but trigger recovery instead. */
> > +		if ((unlikely(ret))) {
> 		    ^^		   ^^
> The extra parentheses are redundant and can be removed for clarity.

You are right, I will remove it. Thanks.

> 
> > +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
> >  			goto recovery;
> > +		}
> >  
> >  		/* Allocated area. */
> >  		va = vas[area];
>
Shivank Garg April 15, 2025, 7 p.m. UTC | #4
On 4/15/2025 6:31 PM, Baoquan He wrote:
> On 04/15/25 at 12:52pm, Shivank Garg wrote:
>> On 4/15/2025 8:09 AM, Baoquan He wrote:
>>> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
>>> indicate if the allocation is successful or failed. That is not very clear.
>>>
>>> Here change to return explicit error values and check them to judge if
>>> allocation is successful.
>>>
>>> IS_ERR_VALUE already uses unlikely() internally
>>>
>>> Signed-off-by: Baoquan He <bhe@redhat.com>
>>> ---
>>>  mm/vmalloc.c | 34 +++++++++++++++++-----------------
>>>  1 file changed, 17 insertions(+), 17 deletions(-)
>>>
>>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>>> index 3f38a232663b..5b21cd09b2b4 100644
>>> --- a/mm/vmalloc.c
>>> +++ b/mm/vmalloc.c
>>> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>>>  			 */
>>>  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
>>>  			if (!lva)
>>> -				return -1;
>>> +				return -ENOMEM;
>>>  		}
>>>  
>>>  		/*
>>> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head,
>>>  		 */
>>>  		va->va_start = nva_start_addr + size;
>>>  	} else {
>>> -		return -1;
>>> +		return -EINVAL;
>>>  	}
> 
> Thanks for reviewing.
> 
>>
>> Braces around return -EINVAL seem unnecessary.
>> They can be dropped.
> 
> This complys with the codeing style required in 3) Placing Braces and
> Spaces of Documentation/process/coding-style.rst because other branches
> are multiple statements.
> 
>>
>>>  
>>>  	if (type != FL_FIT_TYPE) {
>>> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va,
>>>  
>>>  	/* Check the "vend" restriction. */
>>>  	if (nva_start_addr + size > vend)
>>> -		return vend;
>>> +		return -ERANGE;
>>>  
>>>  	/* Update the free vmap_area. */
>>>  	ret = va_clip(root, head, va, nva_start_addr, size);
>>> -	if (WARN_ON_ONCE(ret))
>>> -		return vend;
>>> +	if (ret)
>>> +		return ret;
>>
>> Is it safe to remove the warning, or was it critical for debugging?
> 
> This comes from a reported concern because va_clip() could be failed by 
> NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause
> confusion misleading people to think vmap area management is failed.
> 
>>
>>>  
>>>  	return nva_start_addr;
>>>  }
>>>  
>>>  /*
>>>   * Returns a start address of the newly allocated area, if success.
>>> - * Otherwise a vend is returned that indicates failure.
>>> + * Otherwise an error value is returned that indicates failure.
>>>   */
>>>  static __always_inline unsigned long
>>>  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>>> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>>>  
>>>  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
>>>  	if (unlikely(!va))
>>> -		return vend;
>>> +		return -ENOENT;
>>>  
>>>  	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
>>> -	if (nva_start_addr == vend)
>>> -		return vend;
>>>  
>>>  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
>>> -	find_vmap_lowest_match_check(root, head, size, align);
>>> +	if (!IS_ERR_VALUE(nva_start_addr))
>>> +		find_vmap_lowest_match_check(root, head, size, align);
>>>  #endif
>>>  
>>>  	return nva_start_addr;
>>> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align,
>>>  	struct vmap_area *va;
>>>  
>>>  	*vn_id = 0;
>>> -	*addr = vend;
>>> +	*addr = -EINVAL;
>>>  
>>>  	/*
>>>  	 * Fallback to a global heap if not vmalloc or there
>>> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>>>  	}
>>>  
>>>  retry:
>>> -	if (addr == vend) {
>>> +	if (IS_ERR_VALUE(addr)) {
>>>  		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
>>>  		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
>>>  			size, align, vstart, vend);
>>>  		spin_unlock(&free_vmap_area_lock);
>>>  	}
>>>  
>>> -	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
>>> +	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
>>>  
>>>  	/*
>>> -	 * If an allocation fails, the "vend" address is
>>> +	 * If an allocation fails, the error value is
>>>  	 * returned. Therefore trigger the overflow path.
>>>  	 */
>>> -	if (unlikely(addr == vend))
>>> +	if (IS_ERR_VALUE(addr))
>>>  		goto overflow;
>>>  
>>>  	va->va_start = addr;
>>> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>>>  
>>>  		ret = va_clip(&free_vmap_area_root,
>>>  			&free_vmap_area_list, va, start, size);
>>> -		if (WARN_ON_ONCE(unlikely(ret)))
>>> -			/* It is a BUG(), but trigger recovery instead. */
>>> +		if ((unlikely(ret))) {
>> 		    ^^		   ^^
>> The extra parentheses are redundant and can be removed for clarity.
> 
> You are right, I will remove it. Thanks.
> 

Please feel free to add following in next version.

Reviewed-by: Shivank Garg <shivankg@amd.com>
Tested-by: Shivank Garg <shivankg@amd.com>

Thanks,
Shivank

>>
>>> +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
>>>  			goto recovery;
>>> +		}
>>>  
>>>  		/* Allocated area. */
>>>  		va = vas[area];
>>
>
Baoquan He April 15, 2025, 10:57 p.m. UTC | #5
On 04/16/25 at 12:30am, Shivank Garg wrote:
> 
> 
> On 4/15/2025 6:31 PM, Baoquan He wrote:
> > On 04/15/25 at 12:52pm, Shivank Garg wrote:
> >> On 4/15/2025 8:09 AM, Baoquan He wrote:
> >>> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
> >>> indicate if the allocation is successful or failed. That is not very clear.
> >>>
> >>> Here change to return explicit error values and check them to judge if
> >>> allocation is successful.
> >>>
> >>> IS_ERR_VALUE already uses unlikely() internally
> >>>
> >>> Signed-off-by: Baoquan He <bhe@redhat.com>
> >>> ---
> >>>  mm/vmalloc.c | 34 +++++++++++++++++-----------------
> >>>  1 file changed, 17 insertions(+), 17 deletions(-)
> >>>
> >>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> >>> index 3f38a232663b..5b21cd09b2b4 100644
> >>> --- a/mm/vmalloc.c
> >>> +++ b/mm/vmalloc.c
> >>> @@ -1715,7 +1715,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> >>>  			 */
> >>>  			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
> >>>  			if (!lva)
> >>> -				return -1;
> >>> +				return -ENOMEM;
> >>>  		}
> >>>  
> >>>  		/*
> >>> @@ -1729,7 +1729,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> >>>  		 */
> >>>  		va->va_start = nva_start_addr + size;
> >>>  	} else {
> >>> -		return -1;
> >>> +		return -EINVAL;
> >>>  	}
> > 
> > Thanks for reviewing.
> > 
> >>
> >> Braces around return -EINVAL seem unnecessary.
> >> They can be dropped.
> > 
> > This complys with the codeing style required in 3) Placing Braces and
> > Spaces of Documentation/process/coding-style.rst because other branches
> > are multiple statements.
> > 
> >>
> >>>  
> >>>  	if (type != FL_FIT_TYPE) {
> >>> @@ -1758,19 +1758,19 @@ va_alloc(struct vmap_area *va,
> >>>  
> >>>  	/* Check the "vend" restriction. */
> >>>  	if (nva_start_addr + size > vend)
> >>> -		return vend;
> >>> +		return -ERANGE;
> >>>  
> >>>  	/* Update the free vmap_area. */
> >>>  	ret = va_clip(root, head, va, nva_start_addr, size);
> >>> -	if (WARN_ON_ONCE(ret))
> >>> -		return vend;
> >>> +	if (ret)
> >>> +		return ret;
> >>
> >> Is it safe to remove the warning, or was it critical for debugging?
> > 
> > This comes from a reported concern because va_clip() could be failed by 
> > NOTHING_FIT or kmem_cache_alloc failure. The warning here could cause
> > confusion misleading people to think vmap area management is failed.
> > 
> >>
> >>>  
> >>>  	return nva_start_addr;
> >>>  }
> >>>  
> >>>  /*
> >>>   * Returns a start address of the newly allocated area, if success.
> >>> - * Otherwise a vend is returned that indicates failure.
> >>> + * Otherwise an error value is returned that indicates failure.
> >>>   */
> >>>  static __always_inline unsigned long
> >>>  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> >>> @@ -1795,14 +1795,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> >>>  
> >>>  	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
> >>>  	if (unlikely(!va))
> >>> -		return vend;
> >>> +		return -ENOENT;
> >>>  
> >>>  	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
> >>> -	if (nva_start_addr == vend)
> >>> -		return vend;
> >>>  
> >>>  #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> >>> -	find_vmap_lowest_match_check(root, head, size, align);
> >>> +	if (!IS_ERR_VALUE(nva_start_addr))
> >>> +		find_vmap_lowest_match_check(root, head, size, align);
> >>>  #endif
> >>>  
> >>>  	return nva_start_addr;
> >>> @@ -1932,7 +1931,7 @@ node_alloc(unsigned long size, unsigned long align,
> >>>  	struct vmap_area *va;
> >>>  
> >>>  	*vn_id = 0;
> >>> -	*addr = vend;
> >>> +	*addr = -EINVAL;
> >>>  
> >>>  	/*
> >>>  	 * Fallback to a global heap if not vmalloc or there
> >>> @@ -2012,20 +2011,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> >>>  	}
> >>>  
> >>>  retry:
> >>> -	if (addr == vend) {
> >>> +	if (IS_ERR_VALUE(addr)) {
> >>>  		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
> >>>  		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> >>>  			size, align, vstart, vend);
> >>>  		spin_unlock(&free_vmap_area_lock);
> >>>  	}
> >>>  
> >>> -	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> >>> +	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
> >>>  
> >>>  	/*
> >>> -	 * If an allocation fails, the "vend" address is
> >>> +	 * If an allocation fails, the error value is
> >>>  	 * returned. Therefore trigger the overflow path.
> >>>  	 */
> >>> -	if (unlikely(addr == vend))
> >>> +	if (IS_ERR_VALUE(addr))
> >>>  		goto overflow;
> >>>  
> >>>  	va->va_start = addr;
> >>> @@ -4753,9 +4752,10 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> >>>  
> >>>  		ret = va_clip(&free_vmap_area_root,
> >>>  			&free_vmap_area_list, va, start, size);
> >>> -		if (WARN_ON_ONCE(unlikely(ret)))
> >>> -			/* It is a BUG(), but trigger recovery instead. */
> >>> +		if ((unlikely(ret))) {
> >> 		    ^^		   ^^
> >> The extra parentheses are redundant and can be removed for clarity.
> > 
> > You are right, I will remove it. Thanks.
> > 
> 
> Please feel free to add following in next version.
> 
> Reviewed-by: Shivank Garg <shivankg@amd.com>
> Tested-by: Shivank Garg <shivankg@amd.com>

Thanks a lot for your careful reviewing and testing.

> 
> >>
> >>> +			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
> >>>  			goto recovery;
> >>> +		}
> >>>  
> >>>  		/* Allocated area. */
> >>>  		va = vas[area];
> >>
> > 
>
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3f38a232663b..5b21cd09b2b4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1715,7 +1715,7 @@  va_clip(struct rb_root *root, struct list_head *head,
 			 */
 			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
 			if (!lva)
-				return -1;
+				return -ENOMEM;
 		}
 
 		/*
@@ -1729,7 +1729,7 @@  va_clip(struct rb_root *root, struct list_head *head,
 		 */
 		va->va_start = nva_start_addr + size;
 	} else {
-		return -1;
+		return -EINVAL;
 	}
 
 	if (type != FL_FIT_TYPE) {
@@ -1758,19 +1758,19 @@  va_alloc(struct vmap_area *va,
 
 	/* Check the "vend" restriction. */
 	if (nva_start_addr + size > vend)
-		return vend;
+		return -ERANGE;
 
 	/* Update the free vmap_area. */
 	ret = va_clip(root, head, va, nva_start_addr, size);
-	if (WARN_ON_ONCE(ret))
-		return vend;
+	if (ret)
+		return ret;
 
 	return nva_start_addr;
 }
 
 /*
  * Returns a start address of the newly allocated area, if success.
- * Otherwise a vend is returned that indicates failure.
+ * Otherwise an error value is returned that indicates failure.
  */
 static __always_inline unsigned long
 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
@@ -1795,14 +1795,13 @@  __alloc_vmap_area(struct rb_root *root, struct list_head *head,
 
 	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
 	if (unlikely(!va))
-		return vend;
+		return -ENOENT;
 
 	nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
-	if (nva_start_addr == vend)
-		return vend;
 
 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
-	find_vmap_lowest_match_check(root, head, size, align);
+	if (!IS_ERR_VALUE(nva_start_addr))
+		find_vmap_lowest_match_check(root, head, size, align);
 #endif
 
 	return nva_start_addr;
@@ -1932,7 +1931,7 @@  node_alloc(unsigned long size, unsigned long align,
 	struct vmap_area *va;
 
 	*vn_id = 0;
-	*addr = vend;
+	*addr = -EINVAL;
 
 	/*
 	 * Fallback to a global heap if not vmalloc or there
@@ -2012,20 +2011,20 @@  static struct vmap_area *alloc_vmap_area(unsigned long size,
 	}
 
 retry:
-	if (addr == vend) {
+	if (IS_ERR_VALUE(addr)) {
 		preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
 		addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
 			size, align, vstart, vend);
 		spin_unlock(&free_vmap_area_lock);
 	}
 
-	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
+	trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
 
 	/*
-	 * If an allocation fails, the "vend" address is
+	 * If an allocation fails, the error value is
 	 * returned. Therefore trigger the overflow path.
 	 */
-	if (unlikely(addr == vend))
+	if (IS_ERR_VALUE(addr))
 		goto overflow;
 
 	va->va_start = addr;
@@ -4753,9 +4752,10 @@  struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 
 		ret = va_clip(&free_vmap_area_root,
 			&free_vmap_area_list, va, start, size);
-		if (WARN_ON_ONCE(unlikely(ret)))
-			/* It is a BUG(), but trigger recovery instead. */
+		if ((unlikely(ret))) {
+			WARN_ONCE(1, "%s error: errno (%d)\n", __func__, ret);
 			goto recovery;
+		}
 
 		/* Allocated area. */
 		va = vas[area];