@@ -511,12 +511,12 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
+extern struct page *alloc_pages_current(gfp_t gfp_mask);
static inline struct page *
alloc_pages(gfp_t gfp_mask, unsigned int order)
{
- return alloc_pages_current(gfp_mask, order);
+ return alloc_pages_current(gfp_mask | __GFP_ORDER(order));
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
@@ -2108,13 +2108,12 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* %GFP_HIGHMEM highmem allocation,
* %GFP_FS don't call back into a file system.
* %GFP_ATOMIC don't sleep.
- * @order: Power of two of allocation size in pages. 0 is a single page.
*
* Allocate a page from the kernel page pool. When not in
- * interrupt context and apply the current process NUMA policy.
+ * interrupt context apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*/
-struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+struct page *alloc_pages_current(gfp_t gfp)
{
struct mempolicy *pol = &default_policy;
struct page *page;
@@ -2127,10 +2126,9 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
* nor system default_policy
*/
if (pol->mode == MPOL_INTERLEAVE)
- page = alloc_page_interleave(gfp | __GFP_ORDER(order),
- interleave_nodes(pol));
+ page = alloc_page_interleave(gfp, interleave_nodes(pol));
else
- page = __alloc_pages_nodemask(gfp | __GFP_ORDER(order),
+ page = __alloc_pages_nodemask(gfp,
policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));