@@ -1100,6 +1100,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
+ int ret = 0;
do {
next = pmd_addr_end(addr, end);
@@ -1121,15 +1122,23 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
- if (!p)
- return -ENOMEM;
+ if (!p) {
+#ifdef CONFIG_MEMORY_HOTPLUG
+ vmemmap_free(start, end, altmap);
+#endif
+ ret = -ENOMEM;
+ break;
+ }
pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmdp, node, addr, next);
} while (addr = next, addr != end);
- return 0;
+ if (ret)
+ return vmemmap_populate_basepages(start, end, node, altmap);
+ else
+ return ret;
}
#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
void vmemmap_free(unsigned long start, unsigned long end,
When section mappings are enabled, we allocate vmemmap pages from physically continuous memory of size PMD_SZIE using vmemmap_alloc_block_buf(). Section mappings are good to reduce TLB pressure. But when system is highly fragmented and memory blocks are being hot-added at runtime, its possible that such physically continuous memory allocations can fail. Rather than failing the memory hot-add procedure, add a fallback option to allocate vmemmap pages from discontinuous pages using vmemmap_populate_basepages(). Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: David Hildenbrand <david@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Steven Price <steven.price@arm.com> --- arch/arm64/mm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-)