@@ -3081,7 +3081,7 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
}
/* Must be called after current_gfp_context() which can change gfp_mask */
-static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
+static inline unsigned int gfp_to_alloc_flags_fast(gfp_t gfp_mask,
unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
@@ -3784,7 +3784,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
} else if (unlikely(rt_task(current)) && in_task())
alloc_flags |= ALLOC_MIN_RESERVE;
- alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
+ alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, alloc_flags);
return alloc_flags;
}
@@ -4074,7 +4074,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
- alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
+ alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, reserve_flags) |
(alloc_flags & ALLOC_KSWAPD);
/*
@@ -4250,7 +4250,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
if (should_fail_alloc_page(gfp_mask, order))
return false;
- *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
+ *alloc_flags = gfp_to_alloc_flags_fast(gfp_mask, *alloc_flags);
/* Dirty zone balancing only done in the fast path */
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
gfp_to_alloc_flags_cma() is called on the fast path of the page allocator and all it does is set the ALLOC_CMA flag if all the conditions are met for the allocation to be satisfied from the MIGRATE_CMA list. Rename it to be more generic, as it will soon have to handle another another flag. Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com> --- mm/page_alloc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)