@@ -94,7 +94,8 @@ __ref void *alloc_low_pages(unsigned int num)
unsigned int order;
order = get_order((unsigned long)num << PAGE_SHIFT);
- return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
+ return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO |
+ __GFP_ORDER(order));
}
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
@@ -401,8 +401,8 @@ static inline pgd_t *_pgd_alloc(void)
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_pages(PGALLOC_GFP,
- PGD_ALLOCATION_ORDER);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP |
+ __GFP_ORDER(PGD_ALLOCATION_ORDER));
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
@@ -422,7 +422,8 @@ static inline void _pgd_free(pgd_t *pgd)
static inline pgd_t *_pgd_alloc(void)
{
- return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP |
+ __GFP_ORDER(PGD_ALLOCATION_ORDER));
}
static inline void _pgd_free(pgd_t *pgd)
@@ -992,7 +992,7 @@ unsigned long devm_get_free_pages(struct device *dev,
struct pages_devres *devres;
unsigned long addr;
- addr = __get_free_pages(gfp_mask, order);
+ addr = __get_free_pages(gfp_mask | __GFP_ORDER(order));
if (unlikely(!addr))
return 0;
@@ -536,7 +536,7 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, struct vm_area_struct *vma,
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
alloc_pages_vma(gfp_mask, vma, addr, node, false)
-extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+extern unsigned long __get_free_pages(gfp_t gfp_mask);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
@@ -544,10 +544,10 @@ void free_pages_exact(void *virt, size_t size);
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
+ __get_free_pages(gfp_mask)
#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA, (order))
+ __get_free_pages((gfp_mask) | GFP_DMA | __GFP_ORDER(order))
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
@@ -26,7 +26,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
return false;
- batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (!batch)
return false;
@@ -4681,11 +4681,11 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
* address cannot represent highmem pages. Use alloc_pages and then kmap if
* you need to access high mem.
*/
-unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+unsigned long __get_free_pages(gfp_t gfp_mask)
{
struct page *page;
- page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
+ page = __alloc_pages(gfp_mask & ~__GFP_HIGHMEM, numa_mem_id());
if (!page)
return 0;
return (unsigned long) page_address(page);
@@ -4694,7 +4694,7 @@ EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(gfp_t gfp_mask)
{
- return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
+ return __get_free_page(gfp_mask | __GFP_ZERO);
}
EXPORT_SYMBOL(get_zeroed_page);
@@ -4869,7 +4869,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
gfp_mask &= ~__GFP_COMP;
- addr = __get_free_pages(gfp_mask, order);
+ addr = __get_free_pages(gfp_mask | __GFP_ORDER(order));
return make_alloc_exact(addr, order, size);
}
EXPORT_SYMBOL(alloc_pages_exact);