@@ -98,9 +98,9 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */
- page = __alloc_pages_node(nid,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ page = __alloc_pages_node(GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_ORDER(IA64_GRANULE_SHIFT-PAGE_SHIFT),
+ nid);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
@@ -92,8 +92,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
- struct page *p = __alloc_pages_node(node,
- flags, get_order(size));
+ struct page *p = __alloc_pages_node(flags |
+ __GFP_ORDER(get_order(size)), node);
if (likely(p))
cpuaddr = page_address(p);
@@ -123,9 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid;
area->order = order;
- area->pages = __alloc_pages_node(area->nid,
- GFP_KERNEL|__GFP_THISNODE,
- area->order);
+ area->pages = __alloc_pages_node(GFP_KERNEL | __GFP_THISNODE |
+ __GFP_ORDER(area->order), nid);
if (!area->pages) {
printk(KERN_WARNING "%s: no page on node %d\n",
@@ -315,13 +315,13 @@ static void ds_clear_cea(void *cea, size_t size)
preempt_enable();
}
-static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
+static void *dsalloc_pages(size_t size, gfp_t gfp, int cpu)
{
unsigned int order = get_order(size);
int node = cpu_to_node(cpu);
struct page *page;
- page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
+ page = __alloc_pages_node(gfp | __GFP_ZERO | __GFP_ORDER(order), node);
return page ? page_address(page) : NULL;
}
@@ -2379,13 +2379,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return 0;
}
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t gfp)
{
int node = cpu_to_node(cpu);
struct page *pages;
struct vmcs *vmcs;
- pages = __alloc_pages_node(node, flags, vmcs_config.order);
+ pages = __alloc_pages_node(gfp | __GFP_ORDER(vmcs_config.order), node);
if (!pages)
return NULL;
vmcs = page_address(pages);
@@ -240,9 +240,8 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = __alloc_pages_node(nid,
- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
- pg_order);
+ page = __alloc_pages_node(GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_ORDER(pg_order), nid);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -487,13 +487,12 @@ static inline struct page *__alloc_pages(gfp_t gfp, int preferred_nid)
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
*/
-static inline struct page *
-__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
+static inline struct page *__alloc_pages_node(gfp_t gfp, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
- return __alloc_pages(gfp_mask | __GFP_ORDER(order), nid);
+ return __alloc_pages(gfp, nid);
}
/*
@@ -507,7 +506,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return __alloc_pages_node(nid, gfp_mask, order);
+ return __alloc_pages_node(gfp_mask | __GFP_ORDER(order), nid);
}
#ifdef CONFIG_NUMA
@@ -359,7 +359,7 @@ static int profile_prepare_cpu(unsigned int cpu)
if (per_cpu(cpu_profile_hits, cpu)[i])
continue;
- page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ page = __alloc_pages_node(GFP_KERNEL | __GFP_ZERO, node);
if (!page) {
profile_dead_cpu(cpu);
return -ENOMEM;
@@ -945,7 +945,7 @@ struct page *__page_cache_alloc(gfp_t gfp)
do {
cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
- page = __alloc_pages_node(n, gfp, 0);
+ page = __alloc_pages_node(gfp, n);
} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
return page;
@@ -1316,14 +1316,14 @@ static struct page *new_non_cma_page(struct page *page, unsigned long private)
* CMA area again.
*/
thp_gfpmask &= ~__GFP_MOVABLE;
- thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
+ thp = __alloc_pages_node(thp_gfpmask | __GFP_PMD, nid);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
}
- return __alloc_pages_node(nid, gfp_mask, 0);
+ return __alloc_pages_node(nid, gfp_mask);
}
static long check_and_migrate_cma_pages(struct task_struct *tsk,
@@ -770,7 +770,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
{
VM_BUG_ON_PAGE(*hpage, *hpage);
- *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
+ *hpage = __alloc_pages_node(gfp | __GFP_PMD, node);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
@@ -974,8 +974,8 @@ struct page *alloc_new_node_page(struct page *page, unsigned long node)
prep_transhuge_page(thp);
return thp;
} else
- return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
- __GFP_THISNODE, 0);
+ return __alloc_pages_node(GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE, node);
}
/*
@@ -2084,8 +2084,8 @@ alloc_pages_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr,
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, 0);
+ page = __alloc_pages_node(gfp | __GFP_THISNODE,
+ hpage_node);
goto out;
}
}
@@ -1880,11 +1880,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
int nid = (int) data;
struct page *newpage;
- newpage = __alloc_pages_node(nid,
- (GFP_HIGHUSER_MOVABLE |
- __GFP_THISNODE | __GFP_NOMEMALLOC |
- __GFP_NORETRY | __GFP_NOWARN) &
- ~__GFP_RECLAIM, 0);
+ newpage = __alloc_pages_node((GFP_HIGHUSER_MOVABLE | __GFP_THISNODE |
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
+ __GFP_NOWARN) & ~__GFP_RECLAIM,
+ nid);
return newpage;
}
@@ -1393,7 +1393,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
flags |= cachep->allocflags;
- page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
+ page = __alloc_pages_node(flags | __GFP_ORDER(cachep->gfporder),
+ nodeid);
if (!page) {
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
#ifdef CONFIG_NUMA
if (node != NUMA_NO_NODE)
- page = __alloc_pages_node(node, gfp, order);
+ page = __alloc_pages_node(gfp | __GFP_ORDER(order), node);
else
#endif
page = alloc_pages(gfp, order);
@@ -1488,7 +1488,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
- page = __alloc_pages_node(node, flags, order);
+ page = __alloc_pages_node(flags | __GFP_ORDER(order), node);
if (page && memcg_charge_slab(page, flags, order, s)) {
__free_pages(page, order);