Message ID | 20250102065704.647693-1-guoweikang.kernel@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/memmap: Prevent double scanning of memmap by kmemleak | expand |
Hi Guo,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Guo-Weikang/mm-memmap-Prevent-double-scanning-of-memmap-by-kmemleak/20250102-145930
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250102065704.647693-1-guoweikang.kernel%40gmail.com
patch subject: [PATCH] mm/memmap: Prevent double scanning of memmap by kmemleak
config: x86_64-buildonly-randconfig-003-20250102 (https://download.01.org/0day-ci/archive/20250102/202501021601.jub4p3EM-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250102/202501021601.jub4p3EM-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501021601.jub4p3EM-lkp@intel.com/
All warnings (new ones prefixed by >>, old ones prefixed by <<):
>> WARNING: modpost: vmlinux: section mismatch in reference: vmemmap_alloc_block+0xfa (section: .text) -> memmap_alloc (section: .init.text)
On Thu, Jan 02, 2025 at 02:57:03PM +0800, Guo Weikang wrote: > diff --git a/include/linux/memblock.h b/include/linux/memblock.h > index 673d5cae7c81..b0483c534ef7 100644 > --- a/include/linux/memblock.h > +++ b/include/linux/memblock.h > @@ -375,7 +375,13 @@ static inline int memblock_get_region_node(const struct memblock_region *r) > } > #endif /* CONFIG_NUMA */ > > -/* Flags for memblock allocation APIs */ > +/* > + * Flags for memblock allocation APIs > + * MEMBLOCK_ALLOC_ANYWHERE and MEMBLOCK_ALLOC_ACCESSIBLE > + * indicates wheather the allocation is limited by memblock.current_limit. > + * MEMBLOCK_ALLOC_NOLEAKTRACE not only indicates that it does not need to > + * be scanned by kmemleak, but also implies MEMBLOCK_ALLOC_ACCESSIBLE > + */ I'd keep the comment short here, something like: /* * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies * MEMBLOCK_ALLOC_ACCESSIBLE. */ > #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) > #define MEMBLOCK_ALLOC_ACCESSIBLE 0 > #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 > diff --git a/mm/mm_init.c b/mm/mm_init.c > index 24b68b425afb..71b58f5f2492 100644 > --- a/mm/mm_init.c > +++ b/mm/mm_init.c > @@ -1580,6 +1580,10 @@ static void __init free_area_init_core(struct pglist_data *pgdat) > } > } > > +/* > + * Kmemleak will explicitly scan mem_map by traversing all valid `struct *page`, > + * so memblock does not need to be added to the scan list. > + */ > void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, > phys_addr_t min_addr, int nid, bool exact_nid) > { > @@ -1587,11 +1591,11 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, > > if (exact_nid) > ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, > - MEMBLOCK_ALLOC_ACCESSIBLE, > + MEMBLOCK_ALLOC_NOLEAKTRACE, > nid); > else > ptr = memblock_alloc_try_nid_raw(size, align, min_addr, > - MEMBLOCK_ALLOC_ACCESSIBLE, > + MEMBLOCK_ALLOC_NOLEAKTRACE, > nid); > > if (ptr && size > 0) > diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c > index cec67c5f37d8..b6ac9b1d4ff7 100644 > --- a/mm/sparse-vmemmap.c > +++ b/mm/sparse-vmemmap.c > @@ -27,25 +27,10 @@ > #include <linux/spinlock.h> > #include <linux/vmalloc.h> > #include <linux/sched.h> > - > +#include "internal.h" > #include <asm/dma.h> > #include <asm/pgalloc.h> > > -/* > - * Allocate a block of memory to be used to back the virtual memory map > - * or to back the page tables that are used to create the mapping. > - * Uses the main allocators if they are available, else bootmem. > - */ > - > -static void * __ref __earlyonly_bootmem_alloc(int node, > - unsigned long size, > - unsigned long align, > - unsigned long goal) > -{ > - return memblock_alloc_try_nid_raw(size, align, goal, > - MEMBLOCK_ALLOC_ACCESSIBLE, node); > -} > - > void * __meminit vmemmap_alloc_block(unsigned long size, int node) > { > /* If the main allocator is up use that, fallback to bootmem. */ > @@ -66,8 +51,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) > } > return NULL; > } else > - return __earlyonly_bootmem_alloc(node, size, size, > - __pa(MAX_DMA_ADDRESS)); > + return memmap_alloc(size, size, __pa(MAX_DMA_ADDRESS), node, false); > } As the kernel test robot reported, the __ref annotation for __earlyonly_bootmem_alloc() is still needed, otherwise you get a warning that a __meminit function (vmemmap_alloc_block()) is calling an __init one (memmap_alloc()). So I think it's better if you keep this function. Maybe get it to call memmap_alloc() instead of memblock_alloc_try_nid_raw().
Hi,Catalin Catalin Marinas <catalin.marinas@arm.com> wrote on Friday, 3 January 2025 01:15: > > On Thu, Jan 02, 2025 at 02:57:03PM +0800, Guo Weikang wrote: > > diff --git a/include/linux/memblock.h b/include/linux/memblock.h > > index 673d5cae7c81..b0483c534ef7 100644 > > --- a/include/linux/memblock.h > > +++ b/include/linux/memblock.h > > @@ -375,7 +375,13 @@ static inline int memblock_get_region_node(const struct memblock_region *r) > > } > > #endif /* CONFIG_NUMA */ > > > > -/* Flags for memblock allocation APIs */ > > +/* > > + * Flags for memblock allocation APIs > > + * MEMBLOCK_ALLOC_ANYWHERE and MEMBLOCK_ALLOC_ACCESSIBLE > > + * indicates wheather the allocation is limited by memblock.current_limit. > > + * MEMBLOCK_ALLOC_NOLEAKTRACE not only indicates that it does not need to > > + * be scanned by kmemleak, but also implies MEMBLOCK_ALLOC_ACCESSIBLE > > + */ > > I'd keep the comment short here, something like: > > /* > * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies > * MEMBLOCK_ALLOC_ACCESSIBLE. > */ > Thanks, the previous version would be a bit redundant. > > -/* > > - * Allocate a block of memory to be used to back the virtual memory map > > - * or to back the page tables that are used to create the mapping. > > - * Uses the main allocators if they are available, else bootmem. > > - */ > > - > > -static void * __ref __earlyonly_bootmem_alloc(int node, > > - unsigned long size, > > - unsigned long align, > > - unsigned long goal) > > -{ > > - return memblock_alloc_try_nid_raw(size, align, goal, > > - MEMBLOCK_ALLOC_ACCESSIBLE, node); > > -} > > - > > void * __meminit vmemmap_alloc_block(unsigned long size, int node) > > { > > /* If the main allocator is up use that, fallback to bootmem. */ > > @@ -66,8 +51,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) > > } > > return NULL; > > } else > > - return __earlyonly_bootmem_alloc(node, size, size, > > - __pa(MAX_DMA_ADDRESS)); > > + return memmap_alloc(size, size, __pa(MAX_DMA_ADDRESS), node, false); > > } > > As the kernel test robot reported, the __ref annotation for > __earlyonly_bootmem_alloc() is still needed, otherwise you get a warning > that a __meminit function (vmemmap_alloc_block()) is calling an __init > one (memmap_alloc()). So I think it's better if you keep this function. > Maybe get it to call memmap_alloc() instead of > memblock_alloc_try_nid_raw(). > I have also noticed and studied the previous implementation, it is clear to me that _ref annotation is used to prevent init section mismatch WARNING. I will recover __earlyonly_bootmem_alloc(). > -- > Catalin Thanks a lot -- Guo
Hi Guo,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Guo-Weikang/mm-memmap-Prevent-double-scanning-of-memmap-by-kmemleak/20250102-145930
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250102065704.647693-1-guoweikang.kernel%40gmail.com
patch subject: [PATCH] mm/memmap: Prevent double scanning of memmap by kmemleak
config: x86_64-randconfig-072-20250103 (https://download.01.org/0day-ci/archive/20250103/202501031757.LDH2bhm8-lkp@intel.com/config)
compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250103/202501031757.LDH2bhm8-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501031757.LDH2bhm8-lkp@intel.com/
All warnings (new ones prefixed by >>, old ones prefixed by <<):
>> WARNING: modpost: vmlinux: section mismatch in reference: vmemmap_alloc_block+0x8c (section: .text.vmemmap_alloc_block) -> memmap_alloc (section: .init.text)
WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/fpga/tests/fpga-mgr-test.o
WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/fpga/tests/fpga-bridge-test.o
WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/fpga/tests/fpga-region-test.o
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 673d5cae7c81..b0483c534ef7 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -375,7 +375,13 @@ static inline int memblock_get_region_node(const struct memblock_region *r) } #endif /* CONFIG_NUMA */ -/* Flags for memblock allocation APIs */ +/* + * Flags for memblock allocation APIs + * MEMBLOCK_ALLOC_ANYWHERE and MEMBLOCK_ALLOC_ACCESSIBLE + * indicates wheather the allocation is limited by memblock.current_limit. + * MEMBLOCK_ALLOC_NOLEAKTRACE not only indicates that it does not need to + * be scanned by kmemleak, but also implies MEMBLOCK_ALLOC_ACCESSIBLE + */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 diff --git a/mm/mm_init.c b/mm/mm_init.c index 24b68b425afb..71b58f5f2492 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1580,6 +1580,10 @@ static void __init free_area_init_core(struct pglist_data *pgdat) } } +/* + * Kmemleak will explicitly scan mem_map by traversing all valid `struct *page`, + * so memblock does not need to be added to the scan list. + */ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, int nid, bool exact_nid) { @@ -1587,11 +1591,11 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, if (exact_nid) ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, + MEMBLOCK_ALLOC_NOLEAKTRACE, nid); else ptr = memblock_alloc_try_nid_raw(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, + MEMBLOCK_ALLOC_NOLEAKTRACE, nid); if (ptr && size > 0) diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index cec67c5f37d8..b6ac9b1d4ff7 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -27,25 +27,10 @@ #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/sched.h> - +#include "internal.h" #include <asm/dma.h> #include <asm/pgalloc.h> -/* - * Allocate a block of memory to be used to back the virtual memory map - * or to back the page tables that are used to create the mapping. - * Uses the main allocators if they are available, else bootmem. - */ - -static void * __ref __earlyonly_bootmem_alloc(int node, - unsigned long size, - unsigned long align, - unsigned long goal) -{ - return memblock_alloc_try_nid_raw(size, align, goal, - MEMBLOCK_ALLOC_ACCESSIBLE, node); -} - void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ @@ -66,8 +51,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) } return NULL; } else - return __earlyonly_bootmem_alloc(node, size, size, - __pa(MAX_DMA_ADDRESS)); + return memmap_alloc(size, size, __pa(MAX_DMA_ADDRESS), node, false); } static void * __meminit altmap_alloc_block_buf(unsigned long size,
kmemleak explicitly scans the mem_map through the valid struct page objects. However, memmap_alloc() was also adding this memory to the gray object list, causing it to be scanned twice. Removes memmap_alloc() from the scan list and adds a comment to clarify the behavior. Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/ Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com> --- include/linux/memblock.h | 8 +++++++- mm/mm_init.c | 8 ++++++-- mm/sparse-vmemmap.c | 20 ++------------------ 3 files changed, 15 insertions(+), 21 deletions(-)