@@ -40,11 +40,11 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
__GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
- page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
- PAGE_FRAG_CACHE_MAX_ORDER);
+ page = __alloc_pages(gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER,
+ numa_mem_id(), NULL);
#endif
if (unlikely(!page)) {
- page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+ page = __alloc_pages(gfp, 0, numa_mem_id(), NULL);
if (unlikely(!page)) {
nc->encoded_page = 0;
return NULL;
It seems there is about 24Bytes binary size increase for __page_frag_cache_refill() after refactoring in arm64 system with 64K PAGE_SIZE. By doing the gdb disassembling, It seems we can have more than 100Bytes decrease for the binary size by using __alloc_pages() to replace alloc_pages_node(), as there seems to be some unnecessary checking for nid being NUMA_NO_NODE, especially when page_frag is part of the mm system. CC: Alexander Duyck <alexander.duyck@gmail.com> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> --- mm/page_frag_cache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)