@@ -1442,6 +1442,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
*/
enum meminit_context {
MEMINIT_EARLY,
+ MEMINIT_LATE,
MEMINIT_HOTPLUG,
};
@@ -394,9 +394,10 @@ static inline void clear_zone_contiguous(struct zone *zone)
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
-extern void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order);
-extern void __free_pages_core(struct page *page, unsigned int order);
+extern void memblock_free_pages(unsigned long pfn, unsigned int order,
+ enum meminit_context context);
+extern void __free_pages_core(struct page *page, unsigned int order,
+ enum meminit_context context);
/*
* This will have no effect, other than possibly generating a warning, if the
@@ -172,7 +172,7 @@ static void do_collection(void)
shadow = smallstack_pop(&collect);
origin = smallstack_pop(&collect);
kmsan_setup_meta(page, shadow, origin, collect.order);
- __free_pages_core(page, collect.order);
+ __free_pages_core(page, collect.order, MEMINIT_LATE);
}
}
@@ -1685,7 +1685,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ memblock_free_pages(cursor, 0, MEMINIT_LATE);
totalram_pages_inc();
}
}
@@ -2089,7 +2089,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
while (start + (1UL << order) > end)
order--;
- memblock_free_pages(pfn_to_page(start), start, order);
+ memblock_free_pages(start, order, MEMINIT_LATE);
start += (1UL << order);
}
@@ -634,7 +634,7 @@ void generic_online_page(struct page *page, unsigned int order)
* case in page freeing fast path.
*/
debug_pagealloc_map_pages(page, 1 << order);
- __free_pages_core(page, order);
+ __free_pages_core(page, order, MEMINIT_HOTPLUG);
totalram_pages_add(1UL << order);
}
EXPORT_SYMBOL_GPL(generic_online_page);
@@ -1976,7 +1976,7 @@ static void __init deferred_free_range(unsigned long pfn,
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
- __free_pages_core(page, MAX_ORDER);
+ __free_pages_core(page, MAX_ORDER, MEMINIT_LATE);
return;
}
@@ -1986,7 +1986,7 @@ static void __init deferred_free_range(unsigned long pfn,
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if (pageblock_aligned(pfn))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_core(page, 0);
+ __free_pages_core(page, 0, MEMINIT_LATE);
}
}
@@ -2568,9 +2568,10 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
dma_reserve = new_dma_reserve;
}
-void __init memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+void __init memblock_free_pages(unsigned long pfn, unsigned int order,
+ enum meminit_context context)
{
+ struct page *page = pfn_to_page(pfn);
if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
int nid = early_pfn_to_nid(pfn);
@@ -2583,7 +2584,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
/* KMSAN will take care of these pages. */
return;
}
- __free_pages_core(page, order);
+ __free_pages_core(page, order, context);
}
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
@@ -1278,7 +1278,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
__count_vm_events(PGFREE, 1 << order);
}
-void __free_pages_core(struct page *page, unsigned int order)
+void __free_pages_core(struct page *page, unsigned int order, enum meminit_context context)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
@@ -1289,14 +1289,16 @@ void __free_pages_core(struct page *page, unsigned int order)
* of all pages to 1 ("allocated"/"not free"). We have to set the
* refcount of all involved pages to 0.
*/
- prefetchw(p);
- for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
- prefetchw(p + 1);
+ if (context != MEMINIT_EARLY) {
+ prefetchw(p);
+ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
+ prefetchw(p + 1);
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ }
__ClearPageReserved(p);
set_page_count(p, 0);
}
- __ClearPageReserved(p);
- set_page_count(p, 0);
atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
__free_pages_core() will always reset pages count and clear reserved flag. It will consume a lot of time if there are a lot of pages. Introduce MEMINIT_LATE context, if the context is MEMINIT_EARLY, we don't need reset pages count and clear reserved flag. Signed-off-by: Yajun Deng <yajun.deng@linux.dev> --- include/linux/mmzone.h | 1 + mm/internal.h | 7 ++++--- mm/kmsan/init.c | 2 +- mm/memblock.c | 4 ++-- mm/memory_hotplug.c | 2 +- mm/mm_init.c | 11 ++++++----- mm/page_alloc.c | 14 ++++++++------ 7 files changed, 23 insertions(+), 18 deletions(-)