@@ -543,13 +543,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
-
-static inline struct page *
-alloc_pages(gfp_t gfp_mask, unsigned int order)
-{
- return alloc_pages_current(gfp_mask, order);
-}
+struct page *alloc_pages(gfp_t gfp, unsigned int order);
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage);
@@ -2245,7 +2245,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
EXPORT_SYMBOL(alloc_pages_vma);
/**
- * alloc_pages_current - Allocate pages.
+ * alloc_pages - Allocate pages.
*
* @gfp:
* %GFP_USER user allocation,
@@ -2259,7 +2259,7 @@ EXPORT_SYMBOL(alloc_pages_vma);
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*/
-struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+struct page *alloc_pages(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
struct page *page;
@@ -2280,7 +2280,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
return page;
}
-EXPORT_SYMBOL(alloc_pages_current);
+EXPORT_SYMBOL(alloc_pages);
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{