@@ -2535,7 +2535,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
set_page_pfmemalloc(page);
else
clear_page_pfmemalloc(page);
- set_page_refcounted(page);
}
/*
@@ -4281,6 +4280,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
+ set_page_refcounted(page);
/*
* If this is a high-order atomic allocation then check
@@ -4504,8 +4504,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
count_vm_event(COMPACTSTALL);
/* Prep a captured page if available */
- if (page)
+ if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
+ set_page_refcounted(page);
+ }
/* Try get a page from the freelist if available */
if (!page)
@@ -5440,6 +5442,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nr_account++;
prep_new_page(page, 0, gfp, 0);
+ set_page_refcounted(page);
if (page_list)
list_add(&page->lru, page_list);
else
In preparation for allocating frozen pages, stop initialising the page refcount in prep_new_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/page_alloc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)