@@ -208,14 +208,22 @@ static inline int page_cache_add_speculative(struct page *page, int count)
}
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+extern struct page *__page_cache_alloc_order(gfp_t gfp, unsigned int order);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline
+struct page *__page_cache_alloc_order(gfp_t gfp, unsigned int order)
{
- return alloc_pages(gfp, 0);
+ if (order == 0)
+ return alloc_pages(gfp, 0);
+ return prep_transhuge_page(alloc_pages(gfp | __GFP_COMP, order));
}
#endif
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return __page_cache_alloc_order(gfp, 0);
+}
+
static inline struct page *page_cache_alloc(struct address_space *x)
{
return __page_cache_alloc(mapping_gfp_mask(x));
@@ -962,24 +962,28 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
#ifdef CONFIG_NUMA
-struct page *__page_cache_alloc(gfp_t gfp)
+struct page *__page_cache_alloc_order(gfp_t gfp, unsigned int order)
{
int n;
struct page *page;
+ if (order > 0)
+ gfp |= __GFP_COMP;
+
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
cpuset_mems_cookie = read_mems_allowed_begin();
n = cpuset_mem_spread_node();
- page = __alloc_pages_node(n, gfp, 0);
+ page = __alloc_pages_node(n, gfp, order);
+ prep_transhuge_page(page);
} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
return page;
}
- return alloc_pages(gfp, 0);
+ return prep_transhuge_page(alloc_pages(gfp, order));
}
-EXPORT_SYMBOL(__page_cache_alloc);
+EXPORT_SYMBOL(__page_cache_alloc_order);
#endif
/*