@@ -5971,6 +5971,20 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (start_pfn == altmap->base_pfn)
start_pfn += altmap->reserve;
end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
+ } else {
+ struct resource res;
+ int ret;
+
+ /* adjust the start,end in dax pmem kmem case */
+ ret = find_next_iomem_res(start_pfn << PAGE_SHIFT,
+ (end_pfn << PAGE_SHIFT) - 1,
+ IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+ IORES_DESC_PERSISTENT_MEMORY,
+ false, &res);
+ if (!ret) {
+ start_pfn = PFN_UP(res.start);
+ end_pfn = PFN_DOWN(res.end + 1);
+ }
}
#endif
There are 3 cases when doing online pages: - normal RAM, should be aligned with memory block size - persistent memory with ZONE_DEVICE - persistent memory used as normal RAM (kmem) with ZONE_NORMAL, this patch tries to adjust the start_pfn/end_pfn after finding the corresponding resource range. Without this patch, the check of __init_single_page when doing online memory will be failed because those pages haven't been mapped in mmu(not present from mmu's point of view). Signed-off-by: Jia He <justin.he@arm.com> --- mm/page_alloc.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)