diff mbox series

[1/4] mm/vmap: remove "node" argument

Message ID 20190522150939.24605-1-urezki@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/4] mm/vmap: remove "node" argument | expand

Commit Message

Uladzislau Rezki May 22, 2019, 3:09 p.m. UTC
Remove unused argument from the __alloc_vmap_area() function.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 mm/vmalloc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

Comments

Hillf Danton May 24, 2019, 10:33 a.m. UTC | #1
On Wed, 22 May 2019 17:09:37 +0200 Uladzislau Rezki (Sony) wrote:
>  /*
> + * Preload this CPU with one extra vmap_area object to ensure
> + * that we have it available when fit type of free area is
> + * NE_FIT_TYPE.
> + *
> + * The preload is done in non-atomic context thus, it allows us
> + * to use more permissive allocation masks, therefore to be more
> + * stable under low memory condition and high memory pressure.
> + *
> + * If success, it returns zero with preemption disabled. In case
> + * of error, (-ENOMEM) is returned with preemption not disabled.
> + * Note it has to be paired with alloc_vmap_area_preload_end().
> + */
> +static void
> +ne_fit_preload(int *preloaded)
> +{
> +	preempt_disable();
> +
> +	if (!__this_cpu_read(ne_fit_preload_node)) {
> +		struct vmap_area *node;
> +
> +		preempt_enable();
> +		node = kmem_cache_alloc(vmap_area_cachep, GFP_KERNEL);

Alternatively, can you please take another look at the upside to use
the memory node parameter in alloc_vmap_area() for allocating va slab,
given that this preload, unlike adjust_va_to_fit_type() is invoked
with the vmap_area_lock not aquired?

> +		if (node == NULL) {
> +			*preloaded = 0;
> +			return;
> +		}
> +
> +		preempt_disable();
> +
> +		if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, node))
> +			kmem_cache_free(vmap_area_cachep, node);
> +	}
> +
> +	*preloaded = 1;
> +}
> +
> +static void
> +ne_fit_preload_end(int preloaded)
> +{
> +	if (preloaded)
> +		preempt_enable();
> +}
> +
> +/*
>   * Allocate a region of KVA of the specified size and alignment, within the
>   * vstart and vend.
>   */
> @@ -1034,6 +1100,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	struct vmap_area *va;
>  	unsigned long addr;
>  	int purged = 0;
> +	int preloaded;
>  
>  	BUG_ON(!size);
>  	BUG_ON(offset_in_page(size));
> @@ -1056,6 +1123,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
>  
>  retry:
> +	/*
> +	 * Even if it fails we do not really care about that.
> +	 * Just proceed as it is. "overflow" path will refill
> +	 * the cache we allocate from.
> +	 */
> +	ne_fit_preload(&preloaded);
>  	spin_lock(&vmap_area_lock);
>  
>  	/*
> @@ -1063,6 +1136,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>  	 * returned. Therefore trigger the overflow path.
>  	 */
>  	addr = __alloc_vmap_area(size, align, vstart, vend);
> +	ne_fit_preload_end(preloaded);
> +
>  	if (unlikely(addr == vend))
>  		goto overflow;
>  
> -- 
> 2.11.0
>  
Best Regards
Hillf
Uladzislau Rezki May 24, 2019, 2:14 p.m. UTC | #2
On Fri, May 24, 2019 at 06:33:16PM +0800, Hillf Danton wrote:
> 
> On Wed, 22 May 2019 17:09:37 +0200 Uladzislau Rezki (Sony) wrote:
> >  /*
> > + * Preload this CPU with one extra vmap_area object to ensure
> > + * that we have it available when fit type of free area is
> > + * NE_FIT_TYPE.
> > + *
> > + * The preload is done in non-atomic context thus, it allows us
> > + * to use more permissive allocation masks, therefore to be more
> > + * stable under low memory condition and high memory pressure.
> > + *
> > + * If success, it returns zero with preemption disabled. In case
> > + * of error, (-ENOMEM) is returned with preemption not disabled.
> > + * Note it has to be paired with alloc_vmap_area_preload_end().
> > + */
> > +static void
> > +ne_fit_preload(int *preloaded)
> > +{
> > +	preempt_disable();
> > +
> > +	if (!__this_cpu_read(ne_fit_preload_node)) {
> > +		struct vmap_area *node;
> > +
> > +		preempt_enable();
> > +		node = kmem_cache_alloc(vmap_area_cachep, GFP_KERNEL);
> 
> Alternatively, can you please take another look at the upside to use
> the memory node parameter in alloc_vmap_area() for allocating va slab,
> given that this preload, unlike adjust_va_to_fit_type() is invoked
> with the vmap_area_lock not aquired?
> 
Agree. That makes sense. I will upload the v2 where fix all comments.

Thank you!

--
Vlad Rezki
diff mbox series

Patch

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c42872ed82ac..ea1b65fac599 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -985,7 +985,7 @@  adjust_va_to_fit_type(struct vmap_area *va,
  */
 static __always_inline unsigned long
 __alloc_vmap_area(unsigned long size, unsigned long align,
-	unsigned long vstart, unsigned long vend, int node)
+	unsigned long vstart, unsigned long vend)
 {
 	unsigned long nva_start_addr;
 	struct vmap_area *va;
@@ -1062,7 +1062,7 @@  static struct vmap_area *alloc_vmap_area(unsigned long size,
 	 * If an allocation fails, the "vend" address is
 	 * returned. Therefore trigger the overflow path.
 	 */
-	addr = __alloc_vmap_area(size, align, vstart, vend, node);
+	addr = __alloc_vmap_area(size, align, vstart, vend);
 	if (unlikely(addr == vend))
 		goto overflow;