@@ -4784,10 +4784,8 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
/* First allocation attempt */
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
- if (likely(page)) {
- set_page_refcounted(page);
+ if (likely(page))
goto out;
- }
alloc_gfp = gfp;
ac.spread_dirty_pages = false;
@@ -4799,15 +4797,15 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
ac.nodemask = nodemask;
page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
- if (page)
- set_page_refcounted(page);
out:
if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
- __free_pages(page, order);
+ free_frozen_pages(page, order);
page = NULL;
}
+ if (page)
+ set_page_refcounted(page);
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
kmsan_alloc_page(page, order, alloc_gfp);
Remove some code duplication by calling set_page_refcounted() at the end of __alloc_pages() instead of after each call that can allocate a page. That means that we free a frozen page if we've exceeded the allowed memcg memory. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/page_alloc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-)