diff mbox series

[v6,30/37] mm: vmalloc: Enable memory allocation profiling

Message ID 20240321163705.3067592-31-surenb@google.com (mailing list archive)
State New
Headers show
Series Memory allocation profiling | expand

Commit Message

Suren Baghdasaryan March 21, 2024, 4:36 p.m. UTC
From: Kent Overstreet <kent.overstreet@linux.dev>

This wrapps all external vmalloc allocation functions with the
alloc_hooks() wrapper, and switches internal allocations to _noprof
variants where appropriate, for the new memory allocation profiling
feature.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 drivers/staging/media/atomisp/pci/hmm/hmm.c |  2 +-
 include/linux/vmalloc.h                     | 60 ++++++++++----
 kernel/kallsyms_selftest.c                  |  2 +-
 mm/nommu.c                                  | 64 +++++++--------
 mm/util.c                                   | 24 +++---
 mm/vmalloc.c                                | 88 ++++++++++-----------
 6 files changed, 135 insertions(+), 105 deletions(-)

Comments

SeongJae Park March 23, 2024, 6:05 p.m. UTC | #1
Hi Suren and Kent,

On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:

> From: Kent Overstreet <kent.overstreet@linux.dev>
> 
> This wrapps all external vmalloc allocation functions with the
> alloc_hooks() wrapper, and switches internal allocations to _noprof
> variants where appropriate, for the new memory allocation profiling
> feature.

I just noticed latest mm-unstable fails running kunit on my machine as below.
'git-bisect' says this is the first commit of the failure.

    $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
    [10:59:53] Configuring KUnit Kernel ...
    [10:59:53] Building KUnit Kernel ...
    Populating config with:
    $ make ARCH=um O=../kunit.out/ olddefconfig
    Building with:
    $ make ARCH=um O=../kunit.out/ --jobs=36
    ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
    main.c:(.text+0x10b): undefined reference to `vmalloc'
    collect2: error: ld returned 1 exit status

Haven't looked into the code yet, but reporting first.  May I ask your idea?


Thanks,
SJ

> 
> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  drivers/staging/media/atomisp/pci/hmm/hmm.c |  2 +-
>  include/linux/vmalloc.h                     | 60 ++++++++++----
>  kernel/kallsyms_selftest.c                  |  2 +-
>  mm/nommu.c                                  | 64 +++++++--------
>  mm/util.c                                   | 24 +++---
>  mm/vmalloc.c                                | 88 ++++++++++-----------
>  6 files changed, 135 insertions(+), 105 deletions(-)
> 
> diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
> index bb12644fd033..3e2899ad8517 100644
> --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
> +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
> @@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
>  	}
>  
>  	dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
> -		bo->start, bytes, type, vmalloc);
> +		bo->start, bytes, type, vmalloc_noprof);
>  
>  	return bo->start;
>  
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 98ea90e90439..e4a631ec430b 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -2,6 +2,8 @@
>  #ifndef _LINUX_VMALLOC_H
>  #define _LINUX_VMALLOC_H
>  
> +#include <linux/alloc_tag.h>
> +#include <linux/sched.h>
>  #include <linux/spinlock.h>
>  #include <linux/init.h>
>  #include <linux/list.h>
> @@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void);
>  static inline unsigned long vmalloc_nr_pages(void) { return 0; }
>  #endif
>  
> -extern void *vmalloc(unsigned long size) __alloc_size(1);
> -extern void *vzalloc(unsigned long size) __alloc_size(1);
> -extern void *vmalloc_user(unsigned long size) __alloc_size(1);
> -extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
> -extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
> -extern void *vmalloc_32(unsigned long size) __alloc_size(1);
> -extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
> -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> -extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
> +extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
> +#define vmalloc(...)		alloc_hooks(vmalloc_noprof(__VA_ARGS__))
> +
> +extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
> +#define vzalloc(...)		alloc_hooks(vzalloc_noprof(__VA_ARGS__))
> +
> +extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
> +#define vmalloc_user(...)	alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
> +
> +extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
> +#define vmalloc_node(...)	alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
> +
> +extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
> +#define vzalloc_node(...)	alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
> +
> +extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
> +#define vmalloc_32(...)		alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
> +
> +extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
> +#define vmalloc_32_user(...)	alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
> +
> +extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> +#define __vmalloc(...)		alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
> +
> +extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
>  			unsigned long start, unsigned long end, gfp_t gfp_mask,
>  			pgprot_t prot, unsigned long vm_flags, int node,
>  			const void *caller) __alloc_size(1);
> -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
> +#define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
> +
> +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
>  		int node, const void *caller) __alloc_size(1);
> -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> +#define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
> +
> +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> +#define vmalloc_huge(...)	alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
> +
> +extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> +#define __vmalloc_array(...)	alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
> +
> +extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
> +#define vmalloc_array(...)	alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
> +
> +extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> +#define __vcalloc(...)		alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
>  
> -extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> -extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
> -extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> -extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
> +extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
> +#define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
>  
>  extern void vfree(const void *addr);
>  extern void vfree_atomic(const void *addr);
> diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
> index 8a689b4ff4f9..2f84896a7bcb 100644
> --- a/kernel/kallsyms_selftest.c
> +++ b/kernel/kallsyms_selftest.c
> @@ -82,7 +82,7 @@ static struct test_item test_items[] = {
>  	ITEM_FUNC(kallsyms_test_func_static),
>  	ITEM_FUNC(kallsyms_test_func),
>  	ITEM_FUNC(kallsyms_test_func_weak),
> -	ITEM_FUNC(vmalloc),
> +	ITEM_FUNC(vmalloc_noprof),
>  	ITEM_FUNC(vfree),
>  #ifdef CONFIG_KALLSYMS_ALL
>  	ITEM_DATA(kallsyms_test_var_bss_static),
> diff --git a/mm/nommu.c b/mm/nommu.c
> index 5ec8f44e7ce9..69a6f3b4d156 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -137,28 +137,28 @@ void vfree(const void *addr)
>  }
>  EXPORT_SYMBOL(vfree);
>  
> -void *__vmalloc(unsigned long size, gfp_t gfp_mask)
> +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
>  {
>  	/*
>  	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
>  	 * returns only a logical address.
>  	 */
> -	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
> +	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
>  }
> -EXPORT_SYMBOL(__vmalloc);
> +EXPORT_SYMBOL(__vmalloc_noprof);
>  
> -void *__vmalloc_node_range(unsigned long size, unsigned long align,
> +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
>  		unsigned long start, unsigned long end, gfp_t gfp_mask,
>  		pgprot_t prot, unsigned long vm_flags, int node,
>  		const void *caller)
>  {
> -	return __vmalloc(size, gfp_mask);
> +	return __vmalloc_noprof(size, gfp_mask);
>  }
>  
> -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
> +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
>  		int node, const void *caller)
>  {
> -	return __vmalloc(size, gfp_mask);
> +	return __vmalloc_noprof(size, gfp_mask);
>  }
>  
>  static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
> @@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
>  	return ret;
>  }
>  
> -void *vmalloc_user(unsigned long size)
> +void *vmalloc_user_noprof(unsigned long size)
>  {
>  	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
>  }
> -EXPORT_SYMBOL(vmalloc_user);
> +EXPORT_SYMBOL(vmalloc_user_noprof);
>  
>  struct page *vmalloc_to_page(const void *addr)
>  {
> @@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
>   *	For tight control over page level allocator and protection flags
>   *	use __vmalloc() instead.
>   */
> -void *vmalloc(unsigned long size)
> +void *vmalloc_noprof(unsigned long size)
>  {
> -	return __vmalloc(size, GFP_KERNEL);
> +	return __vmalloc_noprof(size, GFP_KERNEL);
>  }
> -EXPORT_SYMBOL(vmalloc);
> +EXPORT_SYMBOL(vmalloc_noprof);
>  
> -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
> +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
>  
>  /*
>   *	vzalloc - allocate virtually contiguous memory with zero fill
> @@ -237,14 +237,14 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc)
>   *	For tight control over page level allocator and protection flags
>   *	use __vmalloc() instead.
>   */
> -void *vzalloc(unsigned long size)
> +void *vzalloc_noprof(unsigned long size)
>  {
> -	return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
> +	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
>  }
> -EXPORT_SYMBOL(vzalloc);
> +EXPORT_SYMBOL(vzalloc_noprof);
>  
>  /**
> - * vmalloc_node - allocate memory on a specific node
> + * vmalloc_node_noprof - allocate memory on a specific node
>   * @size:	allocation size
>   * @node:	numa node
>   *
> @@ -254,14 +254,14 @@ EXPORT_SYMBOL(vzalloc);
>   * For tight control over page level allocator and protection flags
>   * use __vmalloc() instead.
>   */
> -void *vmalloc_node(unsigned long size, int node)
> +void *vmalloc_node_noprof(unsigned long size, int node)
>  {
> -	return vmalloc(size);
> +	return vmalloc_noprof(size);
>  }
> -EXPORT_SYMBOL(vmalloc_node);
> +EXPORT_SYMBOL(vmalloc_node_noprof);
>  
>  /**
> - * vzalloc_node - allocate memory on a specific node with zero fill
> + * vzalloc_node_noprof - allocate memory on a specific node with zero fill
>   * @size:	allocation size
>   * @node:	numa node
>   *
> @@ -272,27 +272,27 @@ EXPORT_SYMBOL(vmalloc_node);
>   * For tight control over page level allocator and protection flags
>   * use __vmalloc() instead.
>   */
> -void *vzalloc_node(unsigned long size, int node)
> +void *vzalloc_node_noprof(unsigned long size, int node)
>  {
> -	return vzalloc(size);
> +	return vzalloc_noprof(size);
>  }
> -EXPORT_SYMBOL(vzalloc_node);
> +EXPORT_SYMBOL(vzalloc_node_noprof);
>  
>  /**
> - * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
> + * vmalloc_32_noprof  -  allocate virtually contiguous memory (32bit addressable)
>   *	@size:		allocation size
>   *
>   *	Allocate enough 32bit PA addressable pages to cover @size from the
>   *	page level allocator and map them into contiguous kernel virtual space.
>   */
> -void *vmalloc_32(unsigned long size)
> +void *vmalloc_32_noprof(unsigned long size)
>  {
> -	return __vmalloc(size, GFP_KERNEL);
> +	return __vmalloc_noprof(size, GFP_KERNEL);
>  }
> -EXPORT_SYMBOL(vmalloc_32);
> +EXPORT_SYMBOL(vmalloc_32_noprof);
>  
>  /**
> - * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
> + * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
>   *	@size:		allocation size
>   *
>   * The resulting memory area is 32bit addressable and zeroed so it can be
> @@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32);
>   * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
>   * remap_vmalloc_range() are permissible.
>   */
> -void *vmalloc_32_user(unsigned long size)
> +void *vmalloc_32_user_noprof(unsigned long size)
>  {
>  	/*
>  	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
>  	 * but for now this can simply use vmalloc_user() directly.
>  	 */
> -	return vmalloc_user(size);
> +	return vmalloc_user_noprof(size);
>  }
> -EXPORT_SYMBOL(vmalloc_32_user);
> +EXPORT_SYMBOL(vmalloc_32_user_noprof);
>  
>  void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
>  {
> diff --git a/mm/util.c b/mm/util.c
> index a79dce7546f1..157b5edcba75 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -656,7 +656,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
>  	 * about the resulting pointer, and cannot play
>  	 * protection games.
>  	 */
> -	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> +	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
>  			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
>  			node, __builtin_return_address(0));
>  }
> @@ -715,12 +715,12 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
>  EXPORT_SYMBOL(kvrealloc_noprof);
>  
>  /**
> - * __vmalloc_array - allocate memory for a virtually contiguous array.
> + * __vmalloc_array_noprof - allocate memory for a virtually contiguous array.
>   * @n: number of elements.
>   * @size: element size.
>   * @flags: the type of memory to allocate (see kmalloc).
>   */
> -void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
> +void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
>  {
>  	size_t bytes;
>  
> @@ -728,18 +728,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
>  		return NULL;
>  	return __vmalloc(bytes, flags);
>  }
> -EXPORT_SYMBOL(__vmalloc_array);
> +EXPORT_SYMBOL(__vmalloc_array_noprof);
>  
>  /**
> - * vmalloc_array - allocate memory for a virtually contiguous array.
> + * vmalloc_array_noprof - allocate memory for a virtually contiguous array.
>   * @n: number of elements.
>   * @size: element size.
>   */
> -void *vmalloc_array(size_t n, size_t size)
> +void *vmalloc_array_noprof(size_t n, size_t size)
>  {
>  	return __vmalloc_array(n, size, GFP_KERNEL);
>  }
> -EXPORT_SYMBOL(vmalloc_array);
> +EXPORT_SYMBOL(vmalloc_array_noprof);
>  
>  /**
>   * __vcalloc - allocate and zero memory for a virtually contiguous array.
> @@ -747,22 +747,22 @@ EXPORT_SYMBOL(vmalloc_array);
>   * @size: element size.
>   * @flags: the type of memory to allocate (see kmalloc).
>   */
> -void *__vcalloc(size_t n, size_t size, gfp_t flags)
> +void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
>  {
>  	return __vmalloc_array(n, size, flags | __GFP_ZERO);
>  }
> -EXPORT_SYMBOL(__vcalloc);
> +EXPORT_SYMBOL(__vcalloc_noprof);
>  
>  /**
> - * vcalloc - allocate and zero memory for a virtually contiguous array.
> + * vcalloc_noprof - allocate and zero memory for a virtually contiguous array.
>   * @n: number of elements.
>   * @size: element size.
>   */
> -void *vcalloc(size_t n, size_t size)
> +void *vcalloc_noprof(size_t n, size_t size)
>  {
>  	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
>  }
> -EXPORT_SYMBOL(vcalloc);
> +EXPORT_SYMBOL(vcalloc_noprof);
>  
>  struct anon_vma *folio_anon_vma(struct folio *folio)
>  {
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 22aa63f4ef63..b2f2248d85a9 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3507,12 +3507,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
>  			 * but mempolicy wants to alloc memory by interleaving.
>  			 */
>  			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
> -				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
> +				nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
>  							nr_pages_request,
>  							pages + nr_allocated);
>  
>  			else
> -				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
> +				nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
>  							nr_pages_request,
>  							pages + nr_allocated);
>  
> @@ -3542,9 +3542,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
>  			break;
>  
>  		if (nid == NUMA_NO_NODE)
> -			page = alloc_pages(alloc_gfp, order);
> +			page = alloc_pages_noprof(alloc_gfp, order);
>  		else
> -			page = alloc_pages_node(nid, alloc_gfp, order);
> +			page = alloc_pages_node_noprof(nid, alloc_gfp, order);
>  		if (unlikely(!page)) {
>  			if (!nofail)
>  				break;
> @@ -3601,10 +3601,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
>  
>  	/* Please note that the recursion is strictly bounded. */
>  	if (array_size > PAGE_SIZE) {
> -		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
> +		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
>  					area->caller);
>  	} else {
> -		area->pages = kmalloc_node(array_size, nested_gfp, node);
> +		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
>  	}
>  
>  	if (!area->pages) {
> @@ -3687,7 +3687,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
>  }
>  
>  /**
> - * __vmalloc_node_range - allocate virtually contiguous memory
> + * __vmalloc_node_range_noprof - allocate virtually contiguous memory
>   * @size:		  allocation size
>   * @align:		  desired alignment
>   * @start:		  vm area range start
> @@ -3714,7 +3714,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
>   *
>   * Return: the address of the area or %NULL on failure
>   */
> -void *__vmalloc_node_range(unsigned long size, unsigned long align,
> +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
>  			unsigned long start, unsigned long end, gfp_t gfp_mask,
>  			pgprot_t prot, unsigned long vm_flags, int node,
>  			const void *caller)
> @@ -3843,7 +3843,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>  }
>  
>  /**
> - * __vmalloc_node - allocate virtually contiguous memory
> + * __vmalloc_node_noprof - allocate virtually contiguous memory
>   * @size:	    allocation size
>   * @align:	    desired alignment
>   * @gfp_mask:	    flags for the page level allocator
> @@ -3861,10 +3861,10 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *__vmalloc_node(unsigned long size, unsigned long align,
> +void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
>  			    gfp_t gfp_mask, int node, const void *caller)
>  {
> -	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
> +	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
>  				gfp_mask, PAGE_KERNEL, 0, node, caller);
>  }
>  /*
> @@ -3873,15 +3873,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
>   * than that.
>   */
>  #ifdef CONFIG_TEST_VMALLOC_MODULE
> -EXPORT_SYMBOL_GPL(__vmalloc_node);
> +EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
>  #endif
>  
> -void *__vmalloc(unsigned long size, gfp_t gfp_mask)
> +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
>  {
> -	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
> +	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
>  				__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(__vmalloc);
> +EXPORT_SYMBOL(__vmalloc_noprof);
>  
>  /**
>   * vmalloc - allocate virtually contiguous memory
> @@ -3895,12 +3895,12 @@ EXPORT_SYMBOL(__vmalloc);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc(unsigned long size)
> +void *vmalloc_noprof(unsigned long size)
>  {
> -	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
> +	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
>  				__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vmalloc);
> +EXPORT_SYMBOL(vmalloc_noprof);
>  
>  /**
>   * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
> @@ -3914,16 +3914,16 @@ EXPORT_SYMBOL(vmalloc);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
> +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
>  {
> -	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> +	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
>  				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
>  				    NUMA_NO_NODE, __builtin_return_address(0));
>  }
> -EXPORT_SYMBOL_GPL(vmalloc_huge);
> +EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
>  
>  /**
> - * vzalloc - allocate virtually contiguous memory with zero fill
> + * vzalloc_noprof - allocate virtually contiguous memory with zero fill
>   * @size:    allocation size
>   *
>   * Allocate enough pages to cover @size from the page level
> @@ -3935,12 +3935,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vzalloc(unsigned long size)
> +void *vzalloc_noprof(unsigned long size)
>  {
> -	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
> +	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
>  				__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vzalloc);
> +EXPORT_SYMBOL(vzalloc_noprof);
>  
>  /**
>   * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
> @@ -3951,17 +3951,17 @@ EXPORT_SYMBOL(vzalloc);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc_user(unsigned long size)
> +void *vmalloc_user_noprof(unsigned long size)
>  {
> -	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> +	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
>  				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
>  				    VM_USERMAP, NUMA_NO_NODE,
>  				    __builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vmalloc_user);
> +EXPORT_SYMBOL(vmalloc_user_noprof);
>  
>  /**
> - * vmalloc_node - allocate memory on a specific node
> + * vmalloc_node_noprof - allocate memory on a specific node
>   * @size:	  allocation size
>   * @node:	  numa node
>   *
> @@ -3973,15 +3973,15 @@ EXPORT_SYMBOL(vmalloc_user);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc_node(unsigned long size, int node)
> +void *vmalloc_node_noprof(unsigned long size, int node)
>  {
> -	return __vmalloc_node(size, 1, GFP_KERNEL, node,
> +	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
>  			__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vmalloc_node);
> +EXPORT_SYMBOL(vmalloc_node_noprof);
>  
>  /**
> - * vzalloc_node - allocate memory on a specific node with zero fill
> + * vzalloc_node_noprof - allocate memory on a specific node with zero fill
>   * @size:	allocation size
>   * @node:	numa node
>   *
> @@ -3991,12 +3991,12 @@ EXPORT_SYMBOL(vmalloc_node);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vzalloc_node(unsigned long size, int node)
> +void *vzalloc_node_noprof(unsigned long size, int node)
>  {
> -	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
> +	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
>  				__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vzalloc_node);
> +EXPORT_SYMBOL(vzalloc_node_noprof);
>  
>  #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
>  #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
> @@ -4011,7 +4011,7 @@ EXPORT_SYMBOL(vzalloc_node);
>  #endif
>  
>  /**
> - * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
> + * vmalloc_32_noprof - allocate virtually contiguous memory (32bit addressable)
>   * @size:	allocation size
>   *
>   * Allocate enough 32bit PA addressable pages to cover @size from the
> @@ -4019,15 +4019,15 @@ EXPORT_SYMBOL(vzalloc_node);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc_32(unsigned long size)
> +void *vmalloc_32_noprof(unsigned long size)
>  {
> -	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
> +	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
>  			__builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vmalloc_32);
> +EXPORT_SYMBOL(vmalloc_32_noprof);
>  
>  /**
> - * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
> + * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
>   * @size:	     allocation size
>   *
>   * The resulting memory area is 32bit addressable and zeroed so it can be
> @@ -4035,14 +4035,14 @@ EXPORT_SYMBOL(vmalloc_32);
>   *
>   * Return: pointer to the allocated memory or %NULL on error
>   */
> -void *vmalloc_32_user(unsigned long size)
> +void *vmalloc_32_user_noprof(unsigned long size)
>  {
> -	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> +	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
>  				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
>  				    VM_USERMAP, NUMA_NO_NODE,
>  				    __builtin_return_address(0));
>  }
> -EXPORT_SYMBOL(vmalloc_32_user);
> +EXPORT_SYMBOL(vmalloc_32_user_noprof);
>  
>  /*
>   * Atomically zero bytes in the iterator.
> -- 
> 2.44.0.291.gc1ea87d7ee-goog
Suren Baghdasaryan March 25, 2024, 2:56 p.m. UTC | #2
On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
>
> Hi Suren and Kent,
>
> On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > From: Kent Overstreet <kent.overstreet@linux.dev>
> >
> > This wrapps all external vmalloc allocation functions with the
> > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > variants where appropriate, for the new memory allocation profiling
> > feature.
>
> I just noticed latest mm-unstable fails running kunit on my machine as below.
> 'git-bisect' says this is the first commit of the failure.
>
>     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
>     [10:59:53] Configuring KUnit Kernel ...
>     [10:59:53] Building KUnit Kernel ...
>     Populating config with:
>     $ make ARCH=um O=../kunit.out/ olddefconfig
>     Building with:
>     $ make ARCH=um O=../kunit.out/ --jobs=36
>     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
>     main.c:(.text+0x10b): undefined reference to `vmalloc'
>     collect2: error: ld returned 1 exit status
>
> Haven't looked into the code yet, but reporting first.  May I ask your idea?

Hi SeongJae,
Looks like we missed adding "#include <linux/vmalloc.h>" inside
arch/um/os-Linux/main.c in this patch:
https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
I'll be posing fixes for all 0-day issues found over the weekend and
will include a fix for this. In the meantime, to work around it you
can add that include yourself. Please let me know if the issue still
persists after doing that.
Thanks,
Suren.





>
>
> Thanks,
> SJ
>
> >
> > Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  drivers/staging/media/atomisp/pci/hmm/hmm.c |  2 +-
> >  include/linux/vmalloc.h                     | 60 ++++++++++----
> >  kernel/kallsyms_selftest.c                  |  2 +-
> >  mm/nommu.c                                  | 64 +++++++--------
> >  mm/util.c                                   | 24 +++---
> >  mm/vmalloc.c                                | 88 ++++++++++-----------
> >  6 files changed, 135 insertions(+), 105 deletions(-)
> >
> > diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
> > index bb12644fd033..3e2899ad8517 100644
> > --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
> > +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
> > @@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
> >       }
> >
> >       dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
> > -             bo->start, bytes, type, vmalloc);
> > +             bo->start, bytes, type, vmalloc_noprof);
> >
> >       return bo->start;
> >
> > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> > index 98ea90e90439..e4a631ec430b 100644
> > --- a/include/linux/vmalloc.h
> > +++ b/include/linux/vmalloc.h
> > @@ -2,6 +2,8 @@
> >  #ifndef _LINUX_VMALLOC_H
> >  #define _LINUX_VMALLOC_H
> >
> > +#include <linux/alloc_tag.h>
> > +#include <linux/sched.h>
> >  #include <linux/spinlock.h>
> >  #include <linux/init.h>
> >  #include <linux/list.h>
> > @@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void);
> >  static inline unsigned long vmalloc_nr_pages(void) { return 0; }
> >  #endif
> >
> > -extern void *vmalloc(unsigned long size) __alloc_size(1);
> > -extern void *vzalloc(unsigned long size) __alloc_size(1);
> > -extern void *vmalloc_user(unsigned long size) __alloc_size(1);
> > -extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
> > -extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
> > -extern void *vmalloc_32(unsigned long size) __alloc_size(1);
> > -extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
> > -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> > -extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
> > +extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
> > +#define vmalloc(...)         alloc_hooks(vmalloc_noprof(__VA_ARGS__))
> > +
> > +extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
> > +#define vzalloc(...)         alloc_hooks(vzalloc_noprof(__VA_ARGS__))
> > +
> > +extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
> > +#define vmalloc_user(...)    alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
> > +
> > +extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
> > +#define vmalloc_node(...)    alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
> > +
> > +extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
> > +#define vzalloc_node(...)    alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
> > +
> > +extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
> > +#define vmalloc_32(...)              alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
> > +
> > +extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
> > +#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
> > +
> > +extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> > +#define __vmalloc(...)               alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
> > +
> > +extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
> >                       unsigned long start, unsigned long end, gfp_t gfp_mask,
> >                       pgprot_t prot, unsigned long vm_flags, int node,
> >                       const void *caller) __alloc_size(1);
> > -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
> > +#define __vmalloc_node_range(...)    alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
> > +
> > +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
> >               int node, const void *caller) __alloc_size(1);
> > -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> > +#define __vmalloc_node(...)  alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
> > +
> > +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
> > +#define vmalloc_huge(...)    alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
> > +
> > +extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> > +#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
> > +
> > +extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
> > +#define vmalloc_array(...)   alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
> > +
> > +extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> > +#define __vcalloc(...)               alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
> >
> > -extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> > -extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
> > -extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
> > -extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
> > +extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
> > +#define vcalloc(...)         alloc_hooks(vcalloc_noprof(__VA_ARGS__))
> >
> >  extern void vfree(const void *addr);
> >  extern void vfree_atomic(const void *addr);
> > diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
> > index 8a689b4ff4f9..2f84896a7bcb 100644
> > --- a/kernel/kallsyms_selftest.c
> > +++ b/kernel/kallsyms_selftest.c
> > @@ -82,7 +82,7 @@ static struct test_item test_items[] = {
> >       ITEM_FUNC(kallsyms_test_func_static),
> >       ITEM_FUNC(kallsyms_test_func),
> >       ITEM_FUNC(kallsyms_test_func_weak),
> > -     ITEM_FUNC(vmalloc),
> > +     ITEM_FUNC(vmalloc_noprof),
> >       ITEM_FUNC(vfree),
> >  #ifdef CONFIG_KALLSYMS_ALL
> >       ITEM_DATA(kallsyms_test_var_bss_static),
> > diff --git a/mm/nommu.c b/mm/nommu.c
> > index 5ec8f44e7ce9..69a6f3b4d156 100644
> > --- a/mm/nommu.c
> > +++ b/mm/nommu.c
> > @@ -137,28 +137,28 @@ void vfree(const void *addr)
> >  }
> >  EXPORT_SYMBOL(vfree);
> >
> > -void *__vmalloc(unsigned long size, gfp_t gfp_mask)
> > +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
> >  {
> >       /*
> >        *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
> >        * returns only a logical address.
> >        */
> > -     return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
> > +     return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
> >  }
> > -EXPORT_SYMBOL(__vmalloc);
> > +EXPORT_SYMBOL(__vmalloc_noprof);
> >
> > -void *__vmalloc_node_range(unsigned long size, unsigned long align,
> > +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
> >               unsigned long start, unsigned long end, gfp_t gfp_mask,
> >               pgprot_t prot, unsigned long vm_flags, int node,
> >               const void *caller)
> >  {
> > -     return __vmalloc(size, gfp_mask);
> > +     return __vmalloc_noprof(size, gfp_mask);
> >  }
> >
> > -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
> > +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
> >               int node, const void *caller)
> >  {
> > -     return __vmalloc(size, gfp_mask);
> > +     return __vmalloc_noprof(size, gfp_mask);
> >  }
> >
> >  static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
> > @@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
> >       return ret;
> >  }
> >
> > -void *vmalloc_user(unsigned long size)
> > +void *vmalloc_user_noprof(unsigned long size)
> >  {
> >       return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
> >  }
> > -EXPORT_SYMBOL(vmalloc_user);
> > +EXPORT_SYMBOL(vmalloc_user_noprof);
> >
> >  struct page *vmalloc_to_page(const void *addr)
> >  {
> > @@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
> >   *   For tight control over page level allocator and protection flags
> >   *   use __vmalloc() instead.
> >   */
> > -void *vmalloc(unsigned long size)
> > +void *vmalloc_noprof(unsigned long size)
> >  {
> > -     return __vmalloc(size, GFP_KERNEL);
> > +     return __vmalloc_noprof(size, GFP_KERNEL);
> >  }
> > -EXPORT_SYMBOL(vmalloc);
> > +EXPORT_SYMBOL(vmalloc_noprof);
> >
> > -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
> > +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
> >
> >  /*
> >   *   vzalloc - allocate virtually contiguous memory with zero fill
> > @@ -237,14 +237,14 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc)
> >   *   For tight control over page level allocator and protection flags
> >   *   use __vmalloc() instead.
> >   */
> > -void *vzalloc(unsigned long size)
> > +void *vzalloc_noprof(unsigned long size)
> >  {
> > -     return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
> > +     return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
> >  }
> > -EXPORT_SYMBOL(vzalloc);
> > +EXPORT_SYMBOL(vzalloc_noprof);
> >
> >  /**
> > - * vmalloc_node - allocate memory on a specific node
> > + * vmalloc_node_noprof - allocate memory on a specific node
> >   * @size:    allocation size
> >   * @node:    numa node
> >   *
> > @@ -254,14 +254,14 @@ EXPORT_SYMBOL(vzalloc);
> >   * For tight control over page level allocator and protection flags
> >   * use __vmalloc() instead.
> >   */
> > -void *vmalloc_node(unsigned long size, int node)
> > +void *vmalloc_node_noprof(unsigned long size, int node)
> >  {
> > -     return vmalloc(size);
> > +     return vmalloc_noprof(size);
> >  }
> > -EXPORT_SYMBOL(vmalloc_node);
> > +EXPORT_SYMBOL(vmalloc_node_noprof);
> >
> >  /**
> > - * vzalloc_node - allocate memory on a specific node with zero fill
> > + * vzalloc_node_noprof - allocate memory on a specific node with zero fill
> >   * @size:    allocation size
> >   * @node:    numa node
> >   *
> > @@ -272,27 +272,27 @@ EXPORT_SYMBOL(vmalloc_node);
> >   * For tight control over page level allocator and protection flags
> >   * use __vmalloc() instead.
> >   */
> > -void *vzalloc_node(unsigned long size, int node)
> > +void *vzalloc_node_noprof(unsigned long size, int node)
> >  {
> > -     return vzalloc(size);
> > +     return vzalloc_noprof(size);
> >  }
> > -EXPORT_SYMBOL(vzalloc_node);
> > +EXPORT_SYMBOL(vzalloc_node_noprof);
> >
> >  /**
> > - * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
> > + * vmalloc_32_noprof  -  allocate virtually contiguous memory (32bit addressable)
> >   *   @size:          allocation size
> >   *
> >   *   Allocate enough 32bit PA addressable pages to cover @size from the
> >   *   page level allocator and map them into contiguous kernel virtual space.
> >   */
> > -void *vmalloc_32(unsigned long size)
> > +void *vmalloc_32_noprof(unsigned long size)
> >  {
> > -     return __vmalloc(size, GFP_KERNEL);
> > +     return __vmalloc_noprof(size, GFP_KERNEL);
> >  }
> > -EXPORT_SYMBOL(vmalloc_32);
> > +EXPORT_SYMBOL(vmalloc_32_noprof);
> >
> >  /**
> > - * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
> > + * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
> >   *   @size:          allocation size
> >   *
> >   * The resulting memory area is 32bit addressable and zeroed so it can be
> > @@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32);
> >   * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
> >   * remap_vmalloc_range() are permissible.
> >   */
> > -void *vmalloc_32_user(unsigned long size)
> > +void *vmalloc_32_user_noprof(unsigned long size)
> >  {
> >       /*
> >        * We'll have to sort out the ZONE_DMA bits for 64-bit,
> >        * but for now this can simply use vmalloc_user() directly.
> >        */
> > -     return vmalloc_user(size);
> > +     return vmalloc_user_noprof(size);
> >  }
> > -EXPORT_SYMBOL(vmalloc_32_user);
> > +EXPORT_SYMBOL(vmalloc_32_user_noprof);
> >
> >  void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
> >  {
> > diff --git a/mm/util.c b/mm/util.c
> > index a79dce7546f1..157b5edcba75 100644
> > --- a/mm/util.c
> > +++ b/mm/util.c
> > @@ -656,7 +656,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
> >        * about the resulting pointer, and cannot play
> >        * protection games.
> >        */
> > -     return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > +     return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
> >                       flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> >                       node, __builtin_return_address(0));
> >  }
> > @@ -715,12 +715,12 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
> >  EXPORT_SYMBOL(kvrealloc_noprof);
> >
> >  /**
> > - * __vmalloc_array - allocate memory for a virtually contiguous array.
> > + * __vmalloc_array_noprof - allocate memory for a virtually contiguous array.
> >   * @n: number of elements.
> >   * @size: element size.
> >   * @flags: the type of memory to allocate (see kmalloc).
> >   */
> > -void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
> > +void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
> >  {
> >       size_t bytes;
> >
> > @@ -728,18 +728,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
> >               return NULL;
> >       return __vmalloc(bytes, flags);
> >  }
> > -EXPORT_SYMBOL(__vmalloc_array);
> > +EXPORT_SYMBOL(__vmalloc_array_noprof);
> >
> >  /**
> > - * vmalloc_array - allocate memory for a virtually contiguous array.
> > + * vmalloc_array_noprof - allocate memory for a virtually contiguous array.
> >   * @n: number of elements.
> >   * @size: element size.
> >   */
> > -void *vmalloc_array(size_t n, size_t size)
> > +void *vmalloc_array_noprof(size_t n, size_t size)
> >  {
> >       return __vmalloc_array(n, size, GFP_KERNEL);
> >  }
> > -EXPORT_SYMBOL(vmalloc_array);
> > +EXPORT_SYMBOL(vmalloc_array_noprof);
> >
> >  /**
> >   * __vcalloc - allocate and zero memory for a virtually contiguous array.
> > @@ -747,22 +747,22 @@ EXPORT_SYMBOL(vmalloc_array);
> >   * @size: element size.
> >   * @flags: the type of memory to allocate (see kmalloc).
> >   */
> > -void *__vcalloc(size_t n, size_t size, gfp_t flags)
> > +void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
> >  {
> >       return __vmalloc_array(n, size, flags | __GFP_ZERO);
> >  }
> > -EXPORT_SYMBOL(__vcalloc);
> > +EXPORT_SYMBOL(__vcalloc_noprof);
> >
> >  /**
> > - * vcalloc - allocate and zero memory for a virtually contiguous array.
> > + * vcalloc_noprof - allocate and zero memory for a virtually contiguous array.
> >   * @n: number of elements.
> >   * @size: element size.
> >   */
> > -void *vcalloc(size_t n, size_t size)
> > +void *vcalloc_noprof(size_t n, size_t size)
> >  {
> >       return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
> >  }
> > -EXPORT_SYMBOL(vcalloc);
> > +EXPORT_SYMBOL(vcalloc_noprof);
> >
> >  struct anon_vma *folio_anon_vma(struct folio *folio)
> >  {
> > diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> > index 22aa63f4ef63..b2f2248d85a9 100644
> > --- a/mm/vmalloc.c
> > +++ b/mm/vmalloc.c
> > @@ -3507,12 +3507,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> >                        * but mempolicy wants to alloc memory by interleaving.
> >                        */
> >                       if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
> > -                             nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
> > +                             nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
> >                                                       nr_pages_request,
> >                                                       pages + nr_allocated);
> >
> >                       else
> > -                             nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
> > +                             nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
> >                                                       nr_pages_request,
> >                                                       pages + nr_allocated);
> >
> > @@ -3542,9 +3542,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> >                       break;
> >
> >               if (nid == NUMA_NO_NODE)
> > -                     page = alloc_pages(alloc_gfp, order);
> > +                     page = alloc_pages_noprof(alloc_gfp, order);
> >               else
> > -                     page = alloc_pages_node(nid, alloc_gfp, order);
> > +                     page = alloc_pages_node_noprof(nid, alloc_gfp, order);
> >               if (unlikely(!page)) {
> >                       if (!nofail)
> >                               break;
> > @@ -3601,10 +3601,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
> >
> >       /* Please note that the recursion is strictly bounded. */
> >       if (array_size > PAGE_SIZE) {
> > -             area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
> > +             area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
> >                                       area->caller);
> >       } else {
> > -             area->pages = kmalloc_node(array_size, nested_gfp, node);
> > +             area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
> >       }
> >
> >       if (!area->pages) {
> > @@ -3687,7 +3687,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
> >  }
> >
> >  /**
> > - * __vmalloc_node_range - allocate virtually contiguous memory
> > + * __vmalloc_node_range_noprof - allocate virtually contiguous memory
> >   * @size:              allocation size
> >   * @align:             desired alignment
> >   * @start:             vm area range start
> > @@ -3714,7 +3714,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
> >   *
> >   * Return: the address of the area or %NULL on failure
> >   */
> > -void *__vmalloc_node_range(unsigned long size, unsigned long align,
> > +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
> >                       unsigned long start, unsigned long end, gfp_t gfp_mask,
> >                       pgprot_t prot, unsigned long vm_flags, int node,
> >                       const void *caller)
> > @@ -3843,7 +3843,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> >  }
> >
> >  /**
> > - * __vmalloc_node - allocate virtually contiguous memory
> > + * __vmalloc_node_noprof - allocate virtually contiguous memory
> >   * @size:        allocation size
> >   * @align:       desired alignment
> >   * @gfp_mask:            flags for the page level allocator
> > @@ -3861,10 +3861,10 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *__vmalloc_node(unsigned long size, unsigned long align,
> > +void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
> >                           gfp_t gfp_mask, int node, const void *caller)
> >  {
> > -     return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
> > +     return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
> >                               gfp_mask, PAGE_KERNEL, 0, node, caller);
> >  }
> >  /*
> > @@ -3873,15 +3873,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align,
> >   * than that.
> >   */
> >  #ifdef CONFIG_TEST_VMALLOC_MODULE
> > -EXPORT_SYMBOL_GPL(__vmalloc_node);
> > +EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
> >  #endif
> >
> > -void *__vmalloc(unsigned long size, gfp_t gfp_mask)
> > +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
> >  {
> > -     return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
> > +     return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
> >                               __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(__vmalloc);
> > +EXPORT_SYMBOL(__vmalloc_noprof);
> >
> >  /**
> >   * vmalloc - allocate virtually contiguous memory
> > @@ -3895,12 +3895,12 @@ EXPORT_SYMBOL(__vmalloc);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc(unsigned long size)
> > +void *vmalloc_noprof(unsigned long size)
> >  {
> > -     return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
> > +     return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
> >                               __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vmalloc);
> > +EXPORT_SYMBOL(vmalloc_noprof);
> >
> >  /**
> >   * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
> > @@ -3914,16 +3914,16 @@ EXPORT_SYMBOL(vmalloc);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
> > +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
> >  {
> > -     return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > +     return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
> >                                   gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> >                                   NUMA_NO_NODE, __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL_GPL(vmalloc_huge);
> > +EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
> >
> >  /**
> > - * vzalloc - allocate virtually contiguous memory with zero fill
> > + * vzalloc_noprof - allocate virtually contiguous memory with zero fill
> >   * @size:    allocation size
> >   *
> >   * Allocate enough pages to cover @size from the page level
> > @@ -3935,12 +3935,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vzalloc(unsigned long size)
> > +void *vzalloc_noprof(unsigned long size)
> >  {
> > -     return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
> > +     return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
> >                               __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vzalloc);
> > +EXPORT_SYMBOL(vzalloc_noprof);
> >
> >  /**
> >   * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
> > @@ -3951,17 +3951,17 @@ EXPORT_SYMBOL(vzalloc);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc_user(unsigned long size)
> > +void *vmalloc_user_noprof(unsigned long size)
> >  {
> > -     return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> > +     return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> >                                   GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
> >                                   VM_USERMAP, NUMA_NO_NODE,
> >                                   __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vmalloc_user);
> > +EXPORT_SYMBOL(vmalloc_user_noprof);
> >
> >  /**
> > - * vmalloc_node - allocate memory on a specific node
> > + * vmalloc_node_noprof - allocate memory on a specific node
> >   * @size:      allocation size
> >   * @node:      numa node
> >   *
> > @@ -3973,15 +3973,15 @@ EXPORT_SYMBOL(vmalloc_user);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc_node(unsigned long size, int node)
> > +void *vmalloc_node_noprof(unsigned long size, int node)
> >  {
> > -     return __vmalloc_node(size, 1, GFP_KERNEL, node,
> > +     return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
> >                       __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vmalloc_node);
> > +EXPORT_SYMBOL(vmalloc_node_noprof);
> >
> >  /**
> > - * vzalloc_node - allocate memory on a specific node with zero fill
> > + * vzalloc_node_noprof - allocate memory on a specific node with zero fill
> >   * @size:    allocation size
> >   * @node:    numa node
> >   *
> > @@ -3991,12 +3991,12 @@ EXPORT_SYMBOL(vmalloc_node);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vzalloc_node(unsigned long size, int node)
> > +void *vzalloc_node_noprof(unsigned long size, int node)
> >  {
> > -     return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
> > +     return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
> >                               __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vzalloc_node);
> > +EXPORT_SYMBOL(vzalloc_node_noprof);
> >
> >  #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
> >  #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
> > @@ -4011,7 +4011,7 @@ EXPORT_SYMBOL(vzalloc_node);
> >  #endif
> >
> >  /**
> > - * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
> > + * vmalloc_32_noprof - allocate virtually contiguous memory (32bit addressable)
> >   * @size:    allocation size
> >   *
> >   * Allocate enough 32bit PA addressable pages to cover @size from the
> > @@ -4019,15 +4019,15 @@ EXPORT_SYMBOL(vzalloc_node);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc_32(unsigned long size)
> > +void *vmalloc_32_noprof(unsigned long size)
> >  {
> > -     return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
> > +     return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
> >                       __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vmalloc_32);
> > +EXPORT_SYMBOL(vmalloc_32_noprof);
> >
> >  /**
> > - * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
> > + * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
> >   * @size:         allocation size
> >   *
> >   * The resulting memory area is 32bit addressable and zeroed so it can be
> > @@ -4035,14 +4035,14 @@ EXPORT_SYMBOL(vmalloc_32);
> >   *
> >   * Return: pointer to the allocated memory or %NULL on error
> >   */
> > -void *vmalloc_32_user(unsigned long size)
> > +void *vmalloc_32_user_noprof(unsigned long size)
> >  {
> > -     return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> > +     return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
> >                                   GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
> >                                   VM_USERMAP, NUMA_NO_NODE,
> >                                   __builtin_return_address(0));
> >  }
> > -EXPORT_SYMBOL(vmalloc_32_user);
> > +EXPORT_SYMBOL(vmalloc_32_user_noprof);
> >
> >  /*
> >   * Atomically zero bytes in the iterator.
> > --
> > 2.44.0.291.gc1ea87d7ee-goog
>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
>
SeongJae Park March 25, 2024, 5:49 p.m. UTC | #3
On Mon, 25 Mar 2024 14:56:01 +0000 Suren Baghdasaryan <surenb@google.com> wrote:

> On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
> >
> > Hi Suren and Kent,
> >
> > On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > > From: Kent Overstreet <kent.overstreet@linux.dev>
> > >
> > > This wrapps all external vmalloc allocation functions with the
> > > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > > variants where appropriate, for the new memory allocation profiling
> > > feature.
> >
> > I just noticed latest mm-unstable fails running kunit on my machine as below.
> > 'git-bisect' says this is the first commit of the failure.
> >
> >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> >     [10:59:53] Configuring KUnit Kernel ...
> >     [10:59:53] Building KUnit Kernel ...
> >     Populating config with:
> >     $ make ARCH=um O=../kunit.out/ olddefconfig
> >     Building with:
> >     $ make ARCH=um O=../kunit.out/ --jobs=36
> >     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
> >     main.c:(.text+0x10b): undefined reference to `vmalloc'
> >     collect2: error: ld returned 1 exit status
> >
> > Haven't looked into the code yet, but reporting first.  May I ask your idea?
> 
> Hi SeongJae,
> Looks like we missed adding "#include <linux/vmalloc.h>" inside
> arch/um/os-Linux/main.c in this patch:
> https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
> I'll be posing fixes for all 0-day issues found over the weekend and
> will include a fix for this. In the meantime, to work around it you
> can add that include yourself. Please let me know if the issue still
> persists after doing that.

Thank you, Suren.  The change made the error message disappears.  However, it
introduced another one.

    $ git diff
    diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
    index c8a42ecbd7a2..8fe274e9f3a4 100644
    --- a/arch/um/os-Linux/main.c
    +++ b/arch/um/os-Linux/main.c
    @@ -16,6 +16,7 @@
     #include <kern_util.h>
     #include <os.h>
     #include <um_malloc.h>
    +#include <linux/vmalloc.h>
    
     #define PGD_BOUND (4 * 1024 * 1024)
     #define STACKSIZE (8 * 1024 * 1024)
    $
    $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
    [10:43:13] Configuring KUnit Kernel ...
    [10:43:13] Building KUnit Kernel ...
    Populating config with:
    $ make ARCH=um O=../kunit.out/ olddefconfig
    Building with:
    $ make ARCH=um O=../kunit.out/ --jobs=36
    ERROR:root:In file included from .../arch/um/kernel/asm-offsets.c:1:
    .../arch/x86/um/shared/sysdep/kernel-offsets.h:9:6: warning: no previous prototype for ‘foo’ [-Wmissing-prototypes]
        9 | void foo(void)
          |      ^~~
    In file included from .../include/linux/alloc_tag.h:8,
                     from .../include/linux/vmalloc.h:5,
                     from .../arch/um/os-Linux/main.c:19:
    .../include/linux/bug.h:5:10: fatal error: asm/bug.h: No such file or directory
        5 | #include <asm/bug.h>
          |          ^~~~~~~~~~~
    compilation terminated.


Thanks,
SJ

[...]
Suren Baghdasaryan March 25, 2024, 5:59 p.m. UTC | #4
On Mon, Mar 25, 2024 at 10:49 AM SeongJae Park <sj@kernel.org> wrote:
>
> On Mon, 25 Mar 2024 14:56:01 +0000 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
> > >
> > > Hi Suren and Kent,
> > >
> > > On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> > >
> > > > From: Kent Overstreet <kent.overstreet@linux.dev>
> > > >
> > > > This wrapps all external vmalloc allocation functions with the
> > > > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > > > variants where appropriate, for the new memory allocation profiling
> > > > feature.
> > >
> > > I just noticed latest mm-unstable fails running kunit on my machine as below.
> > > 'git-bisect' says this is the first commit of the failure.
> > >
> > >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> > >     [10:59:53] Configuring KUnit Kernel ...
> > >     [10:59:53] Building KUnit Kernel ...
> > >     Populating config with:
> > >     $ make ARCH=um O=../kunit.out/ olddefconfig
> > >     Building with:
> > >     $ make ARCH=um O=../kunit.out/ --jobs=36
> > >     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
> > >     main.c:(.text+0x10b): undefined reference to `vmalloc'
> > >     collect2: error: ld returned 1 exit status
> > >
> > > Haven't looked into the code yet, but reporting first.  May I ask your idea?
> >
> > Hi SeongJae,
> > Looks like we missed adding "#include <linux/vmalloc.h>" inside
> > arch/um/os-Linux/main.c in this patch:
> > https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
> > I'll be posing fixes for all 0-day issues found over the weekend and
> > will include a fix for this. In the meantime, to work around it you
> > can add that include yourself. Please let me know if the issue still
> > persists after doing that.
>
> Thank you, Suren.  The change made the error message disappears.  However, it
> introduced another one.

Ok, let me investigate and I'll try to get a fix for it today evening.
Thanks,
Suren.

>
>     $ git diff
>     diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
>     index c8a42ecbd7a2..8fe274e9f3a4 100644
>     --- a/arch/um/os-Linux/main.c
>     +++ b/arch/um/os-Linux/main.c
>     @@ -16,6 +16,7 @@
>      #include <kern_util.h>
>      #include <os.h>
>      #include <um_malloc.h>
>     +#include <linux/vmalloc.h>
>
>      #define PGD_BOUND (4 * 1024 * 1024)
>      #define STACKSIZE (8 * 1024 * 1024)
>     $
>     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
>     [10:43:13] Configuring KUnit Kernel ...
>     [10:43:13] Building KUnit Kernel ...
>     Populating config with:
>     $ make ARCH=um O=../kunit.out/ olddefconfig
>     Building with:
>     $ make ARCH=um O=../kunit.out/ --jobs=36
>     ERROR:root:In file included from .../arch/um/kernel/asm-offsets.c:1:
>     .../arch/x86/um/shared/sysdep/kernel-offsets.h:9:6: warning: no previous prototype for ‘foo’ [-Wmissing-prototypes]
>         9 | void foo(void)
>           |      ^~~
>     In file included from .../include/linux/alloc_tag.h:8,
>                      from .../include/linux/vmalloc.h:5,
>                      from .../arch/um/os-Linux/main.c:19:
>     .../include/linux/bug.h:5:10: fatal error: asm/bug.h: No such file or directory
>         5 | #include <asm/bug.h>
>           |          ^~~~~~~~~~~
>     compilation terminated.
>
>
> Thanks,
> SJ
>
> [...]
SeongJae Park March 25, 2024, 6:20 p.m. UTC | #5
On Mon, 25 Mar 2024 10:59:01 -0700 Suren Baghdasaryan <surenb@google.com> wrote:

> On Mon, Mar 25, 2024 at 10:49 AM SeongJae Park <sj@kernel.org> wrote:
> >
> > On Mon, 25 Mar 2024 14:56:01 +0000 Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > > On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
> > > >
> > > > Hi Suren and Kent,
> > > >
> > > > On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> > > >
> > > > > From: Kent Overstreet <kent.overstreet@linux.dev>
> > > > >
> > > > > This wrapps all external vmalloc allocation functions with the
> > > > > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > > > > variants where appropriate, for the new memory allocation profiling
> > > > > feature.
> > > >
> > > > I just noticed latest mm-unstable fails running kunit on my machine as below.
> > > > 'git-bisect' says this is the first commit of the failure.
> > > >
> > > >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> > > >     [10:59:53] Configuring KUnit Kernel ...
> > > >     [10:59:53] Building KUnit Kernel ...
> > > >     Populating config with:
> > > >     $ make ARCH=um O=../kunit.out/ olddefconfig
> > > >     Building with:
> > > >     $ make ARCH=um O=../kunit.out/ --jobs=36
> > > >     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
> > > >     main.c:(.text+0x10b): undefined reference to `vmalloc'
> > > >     collect2: error: ld returned 1 exit status
> > > >
> > > > Haven't looked into the code yet, but reporting first.  May I ask your idea?
> > >
> > > Hi SeongJae,
> > > Looks like we missed adding "#include <linux/vmalloc.h>" inside
> > > arch/um/os-Linux/main.c in this patch:
> > > https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
> > > I'll be posing fixes for all 0-day issues found over the weekend and
> > > will include a fix for this. In the meantime, to work around it you
> > > can add that include yourself. Please let me know if the issue still
> > > persists after doing that.
> >
> > Thank you, Suren.  The change made the error message disappears.  However, it
> > introduced another one.
> 
> Ok, let me investigate and I'll try to get a fix for it today evening.

Thank you for this kind reply.  Nonetheless, this is not blocking some real
thing from me.  So, no rush.  Plese take your time :)


Thanks,
SJ

> Thanks,
> Suren.
> 
> >
> >     $ git diff
> >     diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
> >     index c8a42ecbd7a2..8fe274e9f3a4 100644
> >     --- a/arch/um/os-Linux/main.c
> >     +++ b/arch/um/os-Linux/main.c
> >     @@ -16,6 +16,7 @@
> >      #include <kern_util.h>
> >      #include <os.h>
> >      #include <um_malloc.h>
> >     +#include <linux/vmalloc.h>
> >
> >      #define PGD_BOUND (4 * 1024 * 1024)
> >      #define STACKSIZE (8 * 1024 * 1024)
> >     $
> >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> >     [10:43:13] Configuring KUnit Kernel ...
> >     [10:43:13] Building KUnit Kernel ...
> >     Populating config with:
> >     $ make ARCH=um O=../kunit.out/ olddefconfig
> >     Building with:
> >     $ make ARCH=um O=../kunit.out/ --jobs=36
> >     ERROR:root:In file included from .../arch/um/kernel/asm-offsets.c:1:
> >     .../arch/x86/um/shared/sysdep/kernel-offsets.h:9:6: warning: no previous prototype for ‘foo’ [-Wmissing-prototypes]
> >         9 | void foo(void)
> >           |      ^~~
> >     In file included from .../include/linux/alloc_tag.h:8,
> >                      from .../include/linux/vmalloc.h:5,
> >                      from .../arch/um/os-Linux/main.c:19:
> >     .../include/linux/bug.h:5:10: fatal error: asm/bug.h: No such file or directory
> >         5 | #include <asm/bug.h>
> >           |          ^~~~~~~~~~~
> >     compilation terminated.
> >
> >
> > Thanks,
> > SJ
> >
> > [...]
>
Suren Baghdasaryan March 26, 2024, 7:51 a.m. UTC | #6
On Mon, Mar 25, 2024 at 11:20 AM SeongJae Park <sj@kernel.org> wrote:
>
> On Mon, 25 Mar 2024 10:59:01 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > On Mon, Mar 25, 2024 at 10:49 AM SeongJae Park <sj@kernel.org> wrote:
> > >
> > > On Mon, 25 Mar 2024 14:56:01 +0000 Suren Baghdasaryan <surenb@google.com> wrote:
> > >
> > > > On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
> > > > >
> > > > > Hi Suren and Kent,
> > > > >
> > > > > On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> > > > >
> > > > > > From: Kent Overstreet <kent.overstreet@linux.dev>
> > > > > >
> > > > > > This wrapps all external vmalloc allocation functions with the
> > > > > > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > > > > > variants where appropriate, for the new memory allocation profiling
> > > > > > feature.
> > > > >
> > > > > I just noticed latest mm-unstable fails running kunit on my machine as below.
> > > > > 'git-bisect' says this is the first commit of the failure.
> > > > >
> > > > >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> > > > >     [10:59:53] Configuring KUnit Kernel ...
> > > > >     [10:59:53] Building KUnit Kernel ...
> > > > >     Populating config with:
> > > > >     $ make ARCH=um O=../kunit.out/ olddefconfig
> > > > >     Building with:
> > > > >     $ make ARCH=um O=../kunit.out/ --jobs=36
> > > > >     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
> > > > >     main.c:(.text+0x10b): undefined reference to `vmalloc'
> > > > >     collect2: error: ld returned 1 exit status
> > > > >
> > > > > Haven't looked into the code yet, but reporting first.  May I ask your idea?
> > > >
> > > > Hi SeongJae,
> > > > Looks like we missed adding "#include <linux/vmalloc.h>" inside
> > > > arch/um/os-Linux/main.c in this patch:
> > > > https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
> > > > I'll be posing fixes for all 0-day issues found over the weekend and
> > > > will include a fix for this. In the meantime, to work around it you
> > > > can add that include yourself. Please let me know if the issue still
> > > > persists after doing that.
> > >
> > > Thank you, Suren.  The change made the error message disappears.  However, it
> > > introduced another one.
> >
> > Ok, let me investigate and I'll try to get a fix for it today evening.
>
> Thank you for this kind reply.  Nonetheless, this is not blocking some real
> thing from me.  So, no rush.  Plese take your time :)

I posted a fix here:
https://lore.kernel.org/all/20240326073750.726636-1-surenb@google.com/
Please let me know if this resolves the issue.
Thanks,
Suren.

>
>
> Thanks,
> SJ
>
> > Thanks,
> > Suren.
> >
> > >
> > >     $ git diff
> > >     diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
> > >     index c8a42ecbd7a2..8fe274e9f3a4 100644
> > >     --- a/arch/um/os-Linux/main.c
> > >     +++ b/arch/um/os-Linux/main.c
> > >     @@ -16,6 +16,7 @@
> > >      #include <kern_util.h>
> > >      #include <os.h>
> > >      #include <um_malloc.h>
> > >     +#include <linux/vmalloc.h>
> > >
> > >      #define PGD_BOUND (4 * 1024 * 1024)
> > >      #define STACKSIZE (8 * 1024 * 1024)
> > >     $
> > >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> > >     [10:43:13] Configuring KUnit Kernel ...
> > >     [10:43:13] Building KUnit Kernel ...
> > >     Populating config with:
> > >     $ make ARCH=um O=../kunit.out/ olddefconfig
> > >     Building with:
> > >     $ make ARCH=um O=../kunit.out/ --jobs=36
> > >     ERROR:root:In file included from .../arch/um/kernel/asm-offsets.c:1:
> > >     .../arch/x86/um/shared/sysdep/kernel-offsets.h:9:6: warning: no previous prototype for ‘foo’ [-Wmissing-prototypes]
> > >         9 | void foo(void)
> > >           |      ^~~
> > >     In file included from .../include/linux/alloc_tag.h:8,
> > >                      from .../include/linux/vmalloc.h:5,
> > >                      from .../arch/um/os-Linux/main.c:19:
> > >     .../include/linux/bug.h:5:10: fatal error: asm/bug.h: No such file or directory
> > >         5 | #include <asm/bug.h>
> > >           |          ^~~~~~~~~~~
> > >     compilation terminated.
> > >
> > >
> > > Thanks,
> > > SJ
> > >
> > > [...]
> >
>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
>
SeongJae Park March 26, 2024, 3:39 p.m. UTC | #7
On Tue, 26 Mar 2024 00:51:21 -0700 Suren Baghdasaryan <surenb@google.com> wrote:

> On Mon, Mar 25, 2024 at 11:20 AM SeongJae Park <sj@kernel.org> wrote:
> >
> > On Mon, 25 Mar 2024 10:59:01 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > > On Mon, Mar 25, 2024 at 10:49 AM SeongJae Park <sj@kernel.org> wrote:
> > > >
> > > > On Mon, 25 Mar 2024 14:56:01 +0000 Suren Baghdasaryan <surenb@google.com> wrote:
> > > >
> > > > > On Sat, Mar 23, 2024 at 6:05 PM SeongJae Park <sj@kernel.org> wrote:
> > > > > >
> > > > > > Hi Suren and Kent,
> > > > > >
> > > > > > On Thu, 21 Mar 2024 09:36:52 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> > > > > >
> > > > > > > From: Kent Overstreet <kent.overstreet@linux.dev>
> > > > > > >
> > > > > > > This wrapps all external vmalloc allocation functions with the
> > > > > > > alloc_hooks() wrapper, and switches internal allocations to _noprof
> > > > > > > variants where appropriate, for the new memory allocation profiling
> > > > > > > feature.
> > > > > >
> > > > > > I just noticed latest mm-unstable fails running kunit on my machine as below.
> > > > > > 'git-bisect' says this is the first commit of the failure.
> > > > > >
> > > > > >     $ ./tools/testing/kunit/kunit.py run --build_dir ../kunit.out/
> > > > > >     [10:59:53] Configuring KUnit Kernel ...
> > > > > >     [10:59:53] Building KUnit Kernel ...
> > > > > >     Populating config with:
> > > > > >     $ make ARCH=um O=../kunit.out/ olddefconfig
> > > > > >     Building with:
> > > > > >     $ make ARCH=um O=../kunit.out/ --jobs=36
> > > > > >     ERROR:root:/usr/bin/ld: arch/um/os-Linux/main.o: in function `__wrap_malloc':
> > > > > >     main.c:(.text+0x10b): undefined reference to `vmalloc'
> > > > > >     collect2: error: ld returned 1 exit status
> > > > > >
> > > > > > Haven't looked into the code yet, but reporting first.  May I ask your idea?
> > > > >
> > > > > Hi SeongJae,
> > > > > Looks like we missed adding "#include <linux/vmalloc.h>" inside
> > > > > arch/um/os-Linux/main.c in this patch:
> > > > > https://lore.kernel.org/all/20240321163705.3067592-2-surenb@google.com/.
> > > > > I'll be posing fixes for all 0-day issues found over the weekend and
> > > > > will include a fix for this. In the meantime, to work around it you
> > > > > can add that include yourself. Please let me know if the issue still
> > > > > persists after doing that.
> > > >
> > > > Thank you, Suren.  The change made the error message disappears.  However, it
> > > > introduced another one.
> > >
> > > Ok, let me investigate and I'll try to get a fix for it today evening.
> >
> > Thank you for this kind reply.  Nonetheless, this is not blocking some real
> > thing from me.  So, no rush.  Plese take your time :)
> 
> I posted a fix here:
> https://lore.kernel.org/all/20240326073750.726636-1-surenb@google.com/
> Please let me know if this resolves the issue.

I confirmed it is fixing the issue, and replied to the patch with my Tested-by:
tag.  Thank you for this kind fix, Suren.


Thanks,
SJ

[...]
Sourav Panda April 16, 2024, 7:27 p.m. UTC | #8
> -void *__vcalloc(size_t n, size_t size, gfp_t flags)
> +void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
>  {
>  	return __vmalloc_array(n, size, flags | __GFP_ZERO);
>  }
> -EXPORT_SYMBOL(__vcalloc);
> +EXPORT_SYMBOL(__vcalloc_noprof);

__vmalloc_array should instead be __vmalloc_array_noprof. This is because
we would want the more specific tag present in /proc/allocinfo
diff mbox series

Patch

diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index bb12644fd033..3e2899ad8517 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -205,7 +205,7 @@  static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
 	}
 
 	dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n",
-		bo->start, bytes, type, vmalloc);
+		bo->start, bytes, type, vmalloc_noprof);
 
 	return bo->start;
 
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 98ea90e90439..e4a631ec430b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -2,6 +2,8 @@ 
 #ifndef _LINUX_VMALLOC_H
 #define _LINUX_VMALLOC_H
 
+#include <linux/alloc_tag.h>
+#include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
 #include <linux/list.h>
@@ -138,26 +140,54 @@  extern unsigned long vmalloc_nr_pages(void);
 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
 #endif
 
-extern void *vmalloc(unsigned long size) __alloc_size(1);
-extern void *vzalloc(unsigned long size) __alloc_size(1);
-extern void *vmalloc_user(unsigned long size) __alloc_size(1);
-extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
-extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
-extern void *vmalloc_32(unsigned long size) __alloc_size(1);
-extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
-extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc(...)		alloc_hooks(vmalloc_noprof(__VA_ARGS__))
+
+extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
+#define vzalloc(...)		alloc_hooks(vzalloc_noprof(__VA_ARGS__))
+
+extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_user(...)	alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
+
+extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
+#define vmalloc_node(...)	alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
+
+extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
+#define vzalloc_node(...)	alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
+
+extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_32(...)		alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
+
+extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
+#define vmalloc_32_user(...)	alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define __vmalloc(...)		alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, unsigned long vm_flags, int node,
 			const void *caller) __alloc_size(1);
-void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
+#define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
+
+void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 		int node, const void *caller) __alloc_size(1);
-void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
+
+void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+#define vmalloc_huge(...)	alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
+
+extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+#define __vmalloc_array(...)	alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
+
+extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
+#define vmalloc_array(...)	alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
+
+extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
+#define __vcalloc(...)		alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
 
-extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
-extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
-extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
-extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
+extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
+#define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
 
 extern void vfree(const void *addr);
 extern void vfree_atomic(const void *addr);
diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
index 8a689b4ff4f9..2f84896a7bcb 100644
--- a/kernel/kallsyms_selftest.c
+++ b/kernel/kallsyms_selftest.c
@@ -82,7 +82,7 @@  static struct test_item test_items[] = {
 	ITEM_FUNC(kallsyms_test_func_static),
 	ITEM_FUNC(kallsyms_test_func),
 	ITEM_FUNC(kallsyms_test_func_weak),
-	ITEM_FUNC(vmalloc),
+	ITEM_FUNC(vmalloc_noprof),
 	ITEM_FUNC(vfree),
 #ifdef CONFIG_KALLSYMS_ALL
 	ITEM_DATA(kallsyms_test_var_bss_static),
diff --git a/mm/nommu.c b/mm/nommu.c
index 5ec8f44e7ce9..69a6f3b4d156 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -137,28 +137,28 @@  void vfree(const void *addr)
 }
 EXPORT_SYMBOL(vfree);
 
-void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 {
 	/*
 	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 	 * returns only a logical address.
 	 */
-	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
+	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 }
-EXPORT_SYMBOL(__vmalloc);
+EXPORT_SYMBOL(__vmalloc_noprof);
 
-void *__vmalloc_node_range(unsigned long size, unsigned long align,
+void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 		unsigned long start, unsigned long end, gfp_t gfp_mask,
 		pgprot_t prot, unsigned long vm_flags, int node,
 		const void *caller)
 {
-	return __vmalloc(size, gfp_mask);
+	return __vmalloc_noprof(size, gfp_mask);
 }
 
-void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
+void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 		int node, const void *caller)
 {
-	return __vmalloc(size, gfp_mask);
+	return __vmalloc_noprof(size, gfp_mask);
 }
 
 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
@@ -179,11 +179,11 @@  static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 	return ret;
 }
 
-void *vmalloc_user(unsigned long size)
+void *vmalloc_user_noprof(unsigned long size)
 {
 	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 }
-EXPORT_SYMBOL(vmalloc_user);
+EXPORT_SYMBOL(vmalloc_user_noprof);
 
 struct page *vmalloc_to_page(const void *addr)
 {
@@ -217,13 +217,13 @@  long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
  *	For tight control over page level allocator and protection flags
  *	use __vmalloc() instead.
  */
-void *vmalloc(unsigned long size)
+void *vmalloc_noprof(unsigned long size)
 {
-	return __vmalloc(size, GFP_KERNEL);
+	return __vmalloc_noprof(size, GFP_KERNEL);
 }
-EXPORT_SYMBOL(vmalloc);
+EXPORT_SYMBOL(vmalloc_noprof);
 
-void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
+void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
 
 /*
  *	vzalloc - allocate virtually contiguous memory with zero fill
@@ -237,14 +237,14 @@  void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc)
  *	For tight control over page level allocator and protection flags
  *	use __vmalloc() instead.
  */
-void *vzalloc(unsigned long size)
+void *vzalloc_noprof(unsigned long size)
 {
-	return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
+	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
 }
-EXPORT_SYMBOL(vzalloc);
+EXPORT_SYMBOL(vzalloc_noprof);
 
 /**
- * vmalloc_node - allocate memory on a specific node
+ * vmalloc_node_noprof - allocate memory on a specific node
  * @size:	allocation size
  * @node:	numa node
  *
@@ -254,14 +254,14 @@  EXPORT_SYMBOL(vzalloc);
  * For tight control over page level allocator and protection flags
  * use __vmalloc() instead.
  */
-void *vmalloc_node(unsigned long size, int node)
+void *vmalloc_node_noprof(unsigned long size, int node)
 {
-	return vmalloc(size);
+	return vmalloc_noprof(size);
 }
-EXPORT_SYMBOL(vmalloc_node);
+EXPORT_SYMBOL(vmalloc_node_noprof);
 
 /**
- * vzalloc_node - allocate memory on a specific node with zero fill
+ * vzalloc_node_noprof - allocate memory on a specific node with zero fill
  * @size:	allocation size
  * @node:	numa node
  *
@@ -272,27 +272,27 @@  EXPORT_SYMBOL(vmalloc_node);
  * For tight control over page level allocator and protection flags
  * use __vmalloc() instead.
  */
-void *vzalloc_node(unsigned long size, int node)
+void *vzalloc_node_noprof(unsigned long size, int node)
 {
-	return vzalloc(size);
+	return vzalloc_noprof(size);
 }
-EXPORT_SYMBOL(vzalloc_node);
+EXPORT_SYMBOL(vzalloc_node_noprof);
 
 /**
- * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
+ * vmalloc_32_noprof  -  allocate virtually contiguous memory (32bit addressable)
  *	@size:		allocation size
  *
  *	Allocate enough 32bit PA addressable pages to cover @size from the
  *	page level allocator and map them into contiguous kernel virtual space.
  */
-void *vmalloc_32(unsigned long size)
+void *vmalloc_32_noprof(unsigned long size)
 {
-	return __vmalloc(size, GFP_KERNEL);
+	return __vmalloc_noprof(size, GFP_KERNEL);
 }
-EXPORT_SYMBOL(vmalloc_32);
+EXPORT_SYMBOL(vmalloc_32_noprof);
 
 /**
- * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
+ * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
  *	@size:		allocation size
  *
  * The resulting memory area is 32bit addressable and zeroed so it can be
@@ -301,15 +301,15 @@  EXPORT_SYMBOL(vmalloc_32);
  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
  * remap_vmalloc_range() are permissible.
  */
-void *vmalloc_32_user(unsigned long size)
+void *vmalloc_32_user_noprof(unsigned long size)
 {
 	/*
 	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 	 * but for now this can simply use vmalloc_user() directly.
 	 */
-	return vmalloc_user(size);
+	return vmalloc_user_noprof(size);
 }
-EXPORT_SYMBOL(vmalloc_32_user);
+EXPORT_SYMBOL(vmalloc_32_user_noprof);
 
 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 {
diff --git a/mm/util.c b/mm/util.c
index a79dce7546f1..157b5edcba75 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -656,7 +656,7 @@  void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
 	 * about the resulting pointer, and cannot play
 	 * protection games.
 	 */
-	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 			node, __builtin_return_address(0));
 }
@@ -715,12 +715,12 @@  void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag
 EXPORT_SYMBOL(kvrealloc_noprof);
 
 /**
- * __vmalloc_array - allocate memory for a virtually contiguous array.
+ * __vmalloc_array_noprof - allocate memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
+void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
@@ -728,18 +728,18 @@  void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
 		return NULL;
 	return __vmalloc(bytes, flags);
 }
-EXPORT_SYMBOL(__vmalloc_array);
+EXPORT_SYMBOL(__vmalloc_array_noprof);
 
 /**
- * vmalloc_array - allocate memory for a virtually contiguous array.
+ * vmalloc_array_noprof - allocate memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  */
-void *vmalloc_array(size_t n, size_t size)
+void *vmalloc_array_noprof(size_t n, size_t size)
 {
 	return __vmalloc_array(n, size, GFP_KERNEL);
 }
-EXPORT_SYMBOL(vmalloc_array);
+EXPORT_SYMBOL(vmalloc_array_noprof);
 
 /**
  * __vcalloc - allocate and zero memory for a virtually contiguous array.
@@ -747,22 +747,22 @@  EXPORT_SYMBOL(vmalloc_array);
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-void *__vcalloc(size_t n, size_t size, gfp_t flags)
+void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
 {
 	return __vmalloc_array(n, size, flags | __GFP_ZERO);
 }
-EXPORT_SYMBOL(__vcalloc);
+EXPORT_SYMBOL(__vcalloc_noprof);
 
 /**
- * vcalloc - allocate and zero memory for a virtually contiguous array.
+ * vcalloc_noprof - allocate and zero memory for a virtually contiguous array.
  * @n: number of elements.
  * @size: element size.
  */
-void *vcalloc(size_t n, size_t size)
+void *vcalloc_noprof(size_t n, size_t size)
 {
 	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
 }
-EXPORT_SYMBOL(vcalloc);
+EXPORT_SYMBOL(vcalloc_noprof);
 
 struct anon_vma *folio_anon_vma(struct folio *folio)
 {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 22aa63f4ef63..b2f2248d85a9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3507,12 +3507,12 @@  vm_area_alloc_pages(gfp_t gfp, int nid,
 			 * but mempolicy wants to alloc memory by interleaving.
 			 */
 			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
-				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
+				nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
 							nr_pages_request,
 							pages + nr_allocated);
 
 			else
-				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
+				nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
 							nr_pages_request,
 							pages + nr_allocated);
 
@@ -3542,9 +3542,9 @@  vm_area_alloc_pages(gfp_t gfp, int nid,
 			break;
 
 		if (nid == NUMA_NO_NODE)
-			page = alloc_pages(alloc_gfp, order);
+			page = alloc_pages_noprof(alloc_gfp, order);
 		else
-			page = alloc_pages_node(nid, alloc_gfp, order);
+			page = alloc_pages_node_noprof(nid, alloc_gfp, order);
 		if (unlikely(!page)) {
 			if (!nofail)
 				break;
@@ -3601,10 +3601,10 @@  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
 	/* Please note that the recursion is strictly bounded. */
 	if (array_size > PAGE_SIZE) {
-		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
+		area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
 					area->caller);
 	} else {
-		area->pages = kmalloc_node(array_size, nested_gfp, node);
+		area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
 	}
 
 	if (!area->pages) {
@@ -3687,7 +3687,7 @@  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 }
 
 /**
- * __vmalloc_node_range - allocate virtually contiguous memory
+ * __vmalloc_node_range_noprof - allocate virtually contiguous memory
  * @size:		  allocation size
  * @align:		  desired alignment
  * @start:		  vm area range start
@@ -3714,7 +3714,7 @@  static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  *
  * Return: the address of the area or %NULL on failure
  */
-void *__vmalloc_node_range(unsigned long size, unsigned long align,
+void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, unsigned long vm_flags, int node,
 			const void *caller)
@@ -3843,7 +3843,7 @@  void *__vmalloc_node_range(unsigned long size, unsigned long align,
 }
 
 /**
- * __vmalloc_node - allocate virtually contiguous memory
+ * __vmalloc_node_noprof - allocate virtually contiguous memory
  * @size:	    allocation size
  * @align:	    desired alignment
  * @gfp_mask:	    flags for the page level allocator
@@ -3861,10 +3861,10 @@  void *__vmalloc_node_range(unsigned long size, unsigned long align,
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *__vmalloc_node(unsigned long size, unsigned long align,
+void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
 			    gfp_t gfp_mask, int node, const void *caller)
 {
-	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
 				gfp_mask, PAGE_KERNEL, 0, node, caller);
 }
 /*
@@ -3873,15 +3873,15 @@  void *__vmalloc_node(unsigned long size, unsigned long align,
  * than that.
  */
 #ifdef CONFIG_TEST_VMALLOC_MODULE
-EXPORT_SYMBOL_GPL(__vmalloc_node);
+EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
 #endif
 
-void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 {
-	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(__vmalloc);
+EXPORT_SYMBOL(__vmalloc_noprof);
 
 /**
  * vmalloc - allocate virtually contiguous memory
@@ -3895,12 +3895,12 @@  EXPORT_SYMBOL(__vmalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc(unsigned long size)
+void *vmalloc_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc);
+EXPORT_SYMBOL(vmalloc_noprof);
 
 /**
  * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
@@ -3914,16 +3914,16 @@  EXPORT_SYMBOL(vmalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
+void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
 {
-	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
 				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 				    NUMA_NO_NODE, __builtin_return_address(0));
 }
-EXPORT_SYMBOL_GPL(vmalloc_huge);
+EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
 
 /**
- * vzalloc - allocate virtually contiguous memory with zero fill
+ * vzalloc_noprof - allocate virtually contiguous memory with zero fill
  * @size:    allocation size
  *
  * Allocate enough pages to cover @size from the page level
@@ -3935,12 +3935,12 @@  EXPORT_SYMBOL_GPL(vmalloc_huge);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vzalloc(unsigned long size)
+void *vzalloc_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vzalloc);
+EXPORT_SYMBOL(vzalloc_noprof);
 
 /**
  * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
@@ -3951,17 +3951,17 @@  EXPORT_SYMBOL(vzalloc);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_user(unsigned long size)
+void *vmalloc_user_noprof(unsigned long size)
 {
-	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
 				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
 				    VM_USERMAP, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_user);
+EXPORT_SYMBOL(vmalloc_user_noprof);
 
 /**
- * vmalloc_node - allocate memory on a specific node
+ * vmalloc_node_noprof - allocate memory on a specific node
  * @size:	  allocation size
  * @node:	  numa node
  *
@@ -3973,15 +3973,15 @@  EXPORT_SYMBOL(vmalloc_user);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_node(unsigned long size, int node)
+void *vmalloc_node_noprof(unsigned long size, int node)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL, node,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
 			__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_node);
+EXPORT_SYMBOL(vmalloc_node_noprof);
 
 /**
- * vzalloc_node - allocate memory on a specific node with zero fill
+ * vzalloc_node_noprof - allocate memory on a specific node with zero fill
  * @size:	allocation size
  * @node:	numa node
  *
@@ -3991,12 +3991,12 @@  EXPORT_SYMBOL(vmalloc_node);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vzalloc_node(unsigned long size, int node)
+void *vzalloc_node_noprof(unsigned long size, int node)
 {
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
+	return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
 				__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vzalloc_node);
+EXPORT_SYMBOL(vzalloc_node_noprof);
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
@@ -4011,7 +4011,7 @@  EXPORT_SYMBOL(vzalloc_node);
 #endif
 
 /**
- * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
+ * vmalloc_32_noprof - allocate virtually contiguous memory (32bit addressable)
  * @size:	allocation size
  *
  * Allocate enough 32bit PA addressable pages to cover @size from the
@@ -4019,15 +4019,15 @@  EXPORT_SYMBOL(vzalloc_node);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_32(unsigned long size)
+void *vmalloc_32_noprof(unsigned long size)
 {
-	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
+	return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
 			__builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_32);
+EXPORT_SYMBOL(vmalloc_32_noprof);
 
 /**
- * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
+ * vmalloc_32_user_noprof - allocate zeroed virtually contiguous 32bit memory
  * @size:	     allocation size
  *
  * The resulting memory area is 32bit addressable and zeroed so it can be
@@ -4035,14 +4035,14 @@  EXPORT_SYMBOL(vmalloc_32);
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_32_user(unsigned long size)
+void *vmalloc_32_user_noprof(unsigned long size)
 {
-	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
+	return __vmalloc_node_range_noprof(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
 				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
 				    VM_USERMAP, NUMA_NO_NODE,
 				    __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_32_user);
+EXPORT_SYMBOL(vmalloc_32_user_noprof);
 
 /*
  * Atomically zero bytes in the iterator.