@@ -1561,7 +1561,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
unsigned int alloc_flags)
{
post_alloc_hook(page, order, gfp_flags);
- set_page_refcounted(page);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
@@ -3508,6 +3507,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
+ set_page_refcounted(page);
/*
* If this is a high-order atomic allocation then check
@@ -3732,8 +3732,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
count_vm_event(COMPACTSTALL);
/* Prep a captured page if available */
- if (page)
+ if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
+ set_page_refcounted(page);
+ }
/* Try get a page from the freelist if available */
if (!page)
@@ -4712,6 +4714,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nr_account++;
prep_new_page(page, 0, gfp, 0);
+ set_page_refcounted(page);
if (page_list)
list_add(&page->lru, page_list);
else
@@ -6534,6 +6537,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
check_new_pages(head, order);
prep_new_page(head, order, gfp_mask, 0);
+ set_page_refcounted(head);
} else {
ret = -EINVAL;
WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n",
In preparation for allocating frozen pages, stop initialising the page refcount in prep_new_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/page_alloc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)