@@ -219,6 +219,18 @@ struct vm_area_struct;
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+#define __GFP_ORDER(order) ((__force gfp_t)(order << __GFP_BITS_SHIFT))
+#define __GFP_PMD __GFP_ORDER(PMD_SHIFT - PAGE_SHIFT)
+#define __GFP_PUD __GFP_ORDER(PUD_SHIFT - PAGE_SHIFT)
+
+/*
+ * Extract the order from a GFP bitmask.
+ * Must be the top bits to avoid an AND operation. Don't let
+ * __GFP_BITS_SHIFT get over 27, or we won't be able to encode orders
+ * above 15 (some architectures allow configuring MAX_ORDER up to 64,
+ * but I doubt larger than 31 are ever used).
+ */
+#define gfp_order(gfp) (((__force unsigned int)gfp) >> __GFP_BITS_SHIFT)
/**
* DOC: Useful GFP flag combinations
@@ -464,13 +476,13 @@ static inline void arch_alloc_page(struct page *page, int order) { }
#endif
struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask);
+__alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask);
static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
{
- return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
+ return __alloc_pages_nodemask(gfp_mask | __GFP_ORDER(order),
+ preferred_nid, NULL);
}
/*
@@ -50,7 +50,7 @@ static inline struct page *new_page_nodemask(struct page *page,
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
- new_page = __alloc_pages_nodemask(gfp_mask, order,
+ new_page = __alloc_pages_nodemask(gfp_mask | __GFP_ORDER(order),
preferred_nid, nodemask);
if (new_page && PageTransHuge(new_page))
@@ -1409,10 +1409,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
int order = huge_page_order(h);
struct page *page;
- gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+ gfp_mask |= __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
+ __GFP_ORDER(order);
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
+ page = __alloc_pages_nodemask(gfp_mask, nid, nmask);
if (page)
__count_vm_event(HTLB_BUDDY_PGALLOC);
else
@@ -2093,7 +2093,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
- page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
+ page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order), preferred_nid,
+ nmask);
mpol_cond_put(pol);
out:
return page;
@@ -2129,7 +2130,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
if (pol->mode == MPOL_INTERLEAVE)
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
- page = __alloc_pages_nodemask(gfp, order,
+ page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order),
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
@@ -4622,11 +4622,11 @@ static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
- nodemask_t *nodemask)
+__alloc_pages_nodemask(gfp_t gfp_mask, int preferred_nid, nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
+ unsigned int order = gfp_order(gfp_mask);
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };