@@ -415,7 +415,9 @@ config NUMA
depends on SMP && MMU
select ARCH_SUPPORTS_NUMA_BALANCING
select GENERIC_ARCH_NUMA
+ select HAVE_SETUP_PER_CPU_AREA
select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK
select OF_NUMA
select USE_PERCPU_NUMA_NODE_ID
help
@@ -438,6 +438,14 @@ static void __init kasan_shallow_populate(void *start, void *end)
kasan_shallow_populate_pgd(vaddr, vend);
}
+#ifdef CONFIG_KASAN_VMALLOC
+void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
+{
+ kasan_populate(kasan_mem_to_shadow(start),
+ kasan_mem_to_shadow(start + size));
+}
+#endif
+
static void __init create_tmp_mapping(void)
{
void *ptr;
As explained in commit 6ea529a2037c ("percpu: make embedding first chunk allocator check vmalloc space size"), the embedding first chunk allocator needs the vmalloc space to be larger than the maximum distance between units which are grouped into NUMA nodes. On a very sparse NUMA configurations and a small vmalloc area (for example, it is 64GB in sv39), the allocation of dynamic percpu data in the vmalloc area could fail. So provide the pcpu page allocator as a fallback in case we fall into such a sparse configuration (which happened in arm64 as shown by commit 09cea6195073 ("arm64: support page mapping percpu first chunk allocator")). Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com> --- arch/riscv/Kconfig | 2 ++ arch/riscv/mm/kasan_init.c | 8 ++++++++ 2 files changed, 10 insertions(+)