From patchwork Fri Mar 12 15:43:25 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135057 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1477BC433E0 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id CEA4064FDC for ; Fri, 12 Mar 2021 15:44:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232165AbhCLPoC (ORCPT ); Fri, 12 Mar 2021 10:44:02 -0500 Received: from outbound-smtp54.blacknight.com ([46.22.136.238]:39207 "EHLO outbound-smtp54.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231730AbhCLPnc (ORCPT ); Fri, 12 Mar 2021 10:43:32 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp54.blacknight.com (Postfix) with ESMTPS id DC55EFB0BD for ; Fri, 12 Mar 2021 15:43:31 +0000 (GMT) Received: (qmail 19760 invoked from network); 12 Mar 2021 15:43:31 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:31 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 1/7] mm/page_alloc: Move gfp_allowed_mask enforcement to prepare_alloc_pages Date: Fri, 12 Mar 2021 15:43:25 +0000 Message-Id: <20210312154331.32229-2-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org __alloc_pages updates GFP flags to enforce what flags are allowed during a global context such as booting or suspend. This patch moves the enforcement from __alloc_pages to prepare_alloc_pages so the code can be shared between the single page allocator and a new bulk page allocator. When moving, it is obvious that __alloc_pages() and __alloc_pages use different names for the same variable. This is an unnecessary complication so rename gfp_mask to gfp in prepare_alloc_pages() so the name is consistent. No functional change. Signed-off-by: Mel Gorman --- mm/page_alloc.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 00b67c47ad87..f0c1d74ead6f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4914,15 +4914,18 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, return page; } -static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, +static inline bool prepare_alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) { - ac->highest_zoneidx = gfp_zone(gfp_mask); - ac->zonelist = node_zonelist(preferred_nid, gfp_mask); + gfp &= gfp_allowed_mask; + *alloc_gfp = gfp; + + ac->highest_zoneidx = gfp_zone(gfp); + ac->zonelist = node_zonelist(preferred_nid, gfp); ac->nodemask = nodemask; - ac->migratetype = gfp_migratetype(gfp_mask); + ac->migratetype = gfp_migratetype(gfp); if (cpusets_enabled()) { *alloc_gfp |= __GFP_HARDWALL; @@ -4936,18 +4939,18 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, *alloc_flags |= ALLOC_CPUSET; } - fs_reclaim_acquire(gfp_mask); - fs_reclaim_release(gfp_mask); + fs_reclaim_acquire(gfp); + fs_reclaim_release(gfp); - might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); + might_sleep_if(gfp & __GFP_DIRECT_RECLAIM); - if (should_fail_alloc_page(gfp_mask, order)) + if (should_fail_alloc_page(gfp, order)) return false; - *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags); + *alloc_flags = current_alloc_flags(gfp, *alloc_flags); /* Dirty zone balancing only done in the fast path */ - ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); + ac->spread_dirty_pages = (gfp & __GFP_WRITE); /* * The preferred zone is used for statistics but crucially it is @@ -4980,8 +4983,6 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, return NULL; } - gfp &= gfp_allowed_mask; - alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) return NULL; From patchwork Fri Mar 12 15:43:26 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135065 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id F0D30C432C3 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id C746164FDC for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232466AbhCLPoK (ORCPT ); Fri, 12 Mar 2021 10:44:10 -0500 Received: from outbound-smtp50.blacknight.com ([46.22.136.234]:56419 "EHLO outbound-smtp50.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231861AbhCLPnf (ORCPT ); Fri, 12 Mar 2021 10:43:35 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp50.blacknight.com (Postfix) with ESMTPS id 1EDD3FB0C0 for ; Fri, 12 Mar 2021 15:43:32 +0000 (GMT) Received: (qmail 19782 invoked from network); 12 Mar 2021 15:43:31 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:31 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 2/7] mm/page_alloc: Rename alloced to allocated Date: Fri, 12 Mar 2021 15:43:26 +0000 Message-Id: <20210312154331.32229-3-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Review feedback of the bulk allocator twice found problems with "alloced" being a counter for pages allocated. The naming was based on the API name "alloc" and was based on the idea that verbal communication about malloc tends to use the fake word "malloced" instead of the fake word mallocated. To be consistent, this preparation patch renames alloced to allocated in rmqueue_bulk so the bulk allocator and per-cpu allocator use similar names when the bulk allocator is introduced. Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka --- mm/page_alloc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f0c1d74ead6f..880b1d6368bd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2904,7 +2904,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) { - int i, alloced = 0; + int i, allocated = 0; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { @@ -2927,7 +2927,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * pages are ordered properly. */ list_add_tail(&page->lru, list); - alloced++; + allocated++; if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); @@ -2936,12 +2936,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, /* * i pages were removed from the buddy list even if some leak due * to check_pcp_refill failing so adjust NR_FREE_PAGES based - * on i. Do not confuse with 'alloced' which is the number of + * on i. Do not confuse with 'allocated' which is the number of * pages added to the pcp list. */ __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); - return alloced; + return allocated; } #ifdef CONFIG_NUMA From patchwork Fri Mar 12 15:43:27 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135061 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5EE7EC43331 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 4D7E864FEE for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232363AbhCLPoH (ORCPT ); Fri, 12 Mar 2021 10:44:07 -0500 Received: from outbound-smtp56.blacknight.com ([46.22.136.240]:56443 "EHLO outbound-smtp56.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231889AbhCLPnf (ORCPT ); Fri, 12 Mar 2021 10:43:35 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp56.blacknight.com (Postfix) with ESMTPS id 574DBFB0B7 for ; Fri, 12 Mar 2021 15:43:32 +0000 (GMT) Received: (qmail 19821 invoked from network); 12 Mar 2021 15:43:32 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:32 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 3/7] mm/page_alloc: Add a bulk page allocator Date: Fri, 12 Mar 2021 15:43:27 +0000 Message-Id: <20210312154331.32229-4-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This patch adds a new page allocator interface via alloc_pages_bulk, and __alloc_pages_bulk_nodemask. A caller requests a number of pages to be allocated and added to a list. They can be freed in bulk using free_pages_bulk(). The API is not guaranteed to return the requested number of pages and may fail if the preferred allocation zone has limited free memory, the cpuset changes during the allocation or page debugging decides to fail an allocation. It's up to the caller to request more pages in batch if necessary. Note that this implementation is not very efficient and could be improved but it would require refactoring. The intent is to make it available early to determine what semantics are required by different callers. Once the full semantics are nailed down, it can be refactored. Signed-off-by: Mel Gorman Acked-by: Vlastimil Babka --- include/linux/gfp.h | 12 +++++ mm/page_alloc.c | 116 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0a88f84b08f4..e2cd98dba72e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -518,6 +518,17 @@ static inline int arch_make_page_accessible(struct page *page) struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); +int __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nodemask_t *nodemask, int nr_pages, + struct list_head *list); + +/* Bulk allocate order-0 pages */ +static inline unsigned long +alloc_pages_bulk(gfp_t gfp, unsigned long nr_pages, struct list_head *list) +{ + return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list); +} + /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). @@ -581,6 +592,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); +extern void free_pages_bulk(struct list_head *list); struct page_frag_cache; extern void __page_frag_cache_drain(struct page *page, unsigned int count); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 880b1d6368bd..f48f94375b66 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4436,6 +4436,21 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, } } +/* Drop reference counts and free order-0 pages from a list. */ +void free_pages_bulk(struct list_head *list) +{ + struct page *page, *next; + + list_for_each_entry_safe(page, next, list, lru) { + trace_mm_page_free_batched(page); + if (put_page_testzero(page)) { + list_del(&page->lru); + __free_pages_ok(page, 0, FPI_NONE); + } + } +} +EXPORT_SYMBOL_GPL(free_pages_bulk); + static inline unsigned int gfp_to_alloc_flags(gfp_t gfp_mask) { @@ -4963,6 +4978,107 @@ static inline bool prepare_alloc_pages(gfp_t gfp, unsigned int order, return true; } +/* + * This is a batched version of the page allocator that attempts to + * allocate nr_pages quickly from the preferred zone and add them to list. + * + * Returns the number of pages allocated. + */ +int __alloc_pages_bulk(gfp_t gfp, int preferred_nid, + nodemask_t *nodemask, int nr_pages, + struct list_head *alloc_list) +{ + struct page *page; + unsigned long flags; + struct zone *zone; + struct zoneref *z; + struct per_cpu_pages *pcp; + struct list_head *pcp_list; + struct alloc_context ac; + gfp_t alloc_gfp; + unsigned int alloc_flags; + int allocated = 0; + + if (WARN_ON_ONCE(nr_pages <= 0)) + return 0; + + if (nr_pages == 1) + goto failed; + + /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ + if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, + &alloc_gfp, &alloc_flags)) + return 0; + gfp = alloc_gfp; + + /* Find an allowed local zone that meets the high watermark. */ + for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { + unsigned long mark; + + if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && + !__cpuset_zone_allowed(zone, gfp)) { + continue; + } + + if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && + zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { + goto failed; + } + + mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; + if (zone_watermark_fast(zone, 0, mark, + zonelist_zone_idx(ac.preferred_zoneref), + alloc_flags, gfp)) { + break; + } + } + if (!zone) + return 0; + + /* Attempt the batch allocation */ + local_irq_save(flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + pcp_list = &pcp->lists[ac.migratetype]; + + while (allocated < nr_pages) { + page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags, + pcp, pcp_list); + if (!page) { + /* Try and get at least one page */ + if (!allocated) + goto failed_irq; + break; + } + + list_add(&page->lru, alloc_list); + allocated++; + } + + __count_zid_vm_events(PGALLOC, zone_idx(zone), allocated); + zone_statistics(zone, zone); + + local_irq_restore(flags); + + /* Prep page with IRQs enabled to reduce disabled times */ + list_for_each_entry(page, alloc_list, lru) + prep_new_page(page, 0, gfp, 0); + + return allocated; + +failed_irq: + local_irq_restore(flags); + +failed: + page = __alloc_pages(gfp, 0, preferred_nid, nodemask); + if (page) { + list_add(&page->lru, alloc_list); + allocated = 1; + } + + return allocated; +} +EXPORT_SYMBOL_GPL(__alloc_pages_bulk); + /* * This is the 'heart' of the zoned buddy allocator. */ From patchwork Fri Mar 12 15:43:28 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135067 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8189CC4321A for ; Fri, 12 Mar 2021 15:44:36 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 4818064FDC for ; Fri, 12 Mar 2021 15:44:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229959AbhCLPoL (ORCPT ); Fri, 12 Mar 2021 10:44:11 -0500 Received: from outbound-smtp22.blacknight.com ([81.17.249.190]:48495 "EHLO outbound-smtp22.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231907AbhCLPng (ORCPT ); Fri, 12 Mar 2021 10:43:36 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp22.blacknight.com (Postfix) with ESMTPS id A34BEBAB87 for ; Fri, 12 Mar 2021 15:43:32 +0000 (GMT) Received: (qmail 19847 invoked from network); 12 Mar 2021 15:43:32 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:32 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 4/7] SUNRPC: Set rq_page_end differently Date: Fri, 12 Mar 2021 15:43:28 +0000 Message-Id: <20210312154331.32229-5-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Chuck Lever Refactor: I'm about to use the loop variable @i for something else. As far as the "i++" is concerned, that is a post-increment. The value of @i is not used subsequently, so the increment operator is unnecessary and can be removed. Also note that nfsd_read_actor() was renamed nfsd_splice_actor() by commit cf8208d0eabd ("sendfile: convert nfsd to splice_direct_to_actor()"). Signed-off-by: Chuck Lever Signed-off-by: Mel Gorman --- net/sunrpc/svc_xprt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index dcc50ae54550..cfa7e4776d0e 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -667,8 +667,8 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) } rqstp->rq_pages[i] = p; } - rqstp->rq_page_end = &rqstp->rq_pages[i]; - rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ + rqstp->rq_page_end = &rqstp->rq_pages[pages]; + rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ /* Make arg->head point to first page and arg->pages point to rest */ arg = &rqstp->rq_arg; From patchwork Fri Mar 12 15:43:29 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135069 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id CC419C43603 for ; Fri, 12 Mar 2021 15:44:36 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id A4B5464FFD for ; Fri, 12 Mar 2021 15:44:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232519AbhCLPoM (ORCPT ); Fri, 12 Mar 2021 10:44:12 -0500 Received: from outbound-smtp17.blacknight.com ([46.22.139.234]:34151 "EHLO outbound-smtp17.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232054AbhCLPnh (ORCPT ); Fri, 12 Mar 2021 10:43:37 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp17.blacknight.com (Postfix) with ESMTPS id BC3721C4011 for ; Fri, 12 Mar 2021 15:43:33 +0000 (GMT) Received: (qmail 19870 invoked from network); 12 Mar 2021 15:43:32 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:32 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 5/7] SUNRPC: Refresh rq_pages using a bulk page allocator Date: Fri, 12 Mar 2021 15:43:29 +0000 Message-Id: <20210312154331.32229-6-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Chuck Lever Reduce the rate at which nfsd threads hammer on the page allocator. This improves throughput scalability by enabling the threads to run more independently of each other. Signed-off-by: Chuck Lever Signed-off-by: Mel Gorman --- net/sunrpc/svc_xprt.c | 43 +++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cfa7e4776d0e..38a8d6283801 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) static int svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; + unsigned long needed; struct xdr_buf *arg; + struct page *page; int pages; int i; - /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; if (pages > RPCSVC_MAXPAGES) { pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) /* use as many pages as possible */ pages = RPCSVC_MAXPAGES; } - for (i = 0; i < pages ; i++) - while (rqstp->rq_pages[i] == NULL) { - struct page *p = alloc_page(GFP_KERNEL); - if (!p) { - set_current_state(TASK_INTERRUPTIBLE); - if (signalled() || kthread_should_stop()) { - set_current_state(TASK_RUNNING); - return -EINTR; - } - schedule_timeout(msecs_to_jiffies(500)); + + for (needed = 0, i = 0; i < pages ; i++) + if (!rqstp->rq_pages[i]) + needed++; + if (needed) { + LIST_HEAD(list); + +retry: + alloc_pages_bulk(GFP_KERNEL, needed, &list); + for (i = 0; i < pages; i++) { + if (!rqstp->rq_pages[i]) { + page = list_first_entry_or_null(&list, + struct page, + lru); + if (unlikely(!page)) + goto empty_list; + list_del(&page->lru); + rqstp->rq_pages[i] = page; + needed--; } - rqstp->rq_pages[i] = p; } + } rqstp->rq_page_end = &rqstp->rq_pages[pages]; rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) arg->len = (pages-1)*PAGE_SIZE; arg->tail[0].iov_len = 0; return 0; + +empty_list: + set_current_state(TASK_INTERRUPTIBLE); + if (signalled() || kthread_should_stop()) { + set_current_state(TASK_RUNNING); + return -EINTR; + } + schedule_timeout(msecs_to_jiffies(500)); + goto retry; } static bool From patchwork Fri Mar 12 15:43:30 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135063 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id BC1FCC43333 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 950A764FE0 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232415AbhCLPoI (ORCPT ); Fri, 12 Mar 2021 10:44:08 -0500 Received: from outbound-smtp26.blacknight.com ([81.17.249.194]:45910 "EHLO outbound-smtp26.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231855AbhCLPnf (ORCPT ); Fri, 12 Mar 2021 10:43:35 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp26.blacknight.com (Postfix) with ESMTPS id 1FFEECACB2 for ; Fri, 12 Mar 2021 15:43:33 +0000 (GMT) Received: (qmail 19893 invoked from network); 12 Mar 2021 15:43:32 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:32 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 6/7] net: page_pool: refactor dma_map into own function page_pool_dma_map Date: Fri, 12 Mar 2021 15:43:30 +0000 Message-Id: <20210312154331.32229-7-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Jesper Dangaard Brouer In preparation for next patch, move the dma mapping into its own function, as this will make it easier to follow the changes. V2: make page_pool_dma_map return boolean (Ilias) Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Mel Gorman Reviewed-by: Ilias Apalodimas --- net/core/page_pool.c | 45 +++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index ad8b0707af04..40e1b2beaa6c 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -180,14 +180,37 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, pool->p.dma_dir); } +static bool page_pool_dma_map(struct page_pool *pool, struct page *page) +{ + dma_addr_t dma; + + /* Setup DMA mapping: use 'struct page' area for storing DMA-addr + * since dma_addr_t can be either 32 or 64 bits and does not always fit + * into page private data (i.e 32bit cpu with 64bit DMA caps) + * This mapping is kept for lifetime of page, until leaving pool. + */ + dma = dma_map_page_attrs(pool->p.dev, page, 0, + (PAGE_SIZE << pool->p.order), + pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(pool->p.dev, dma)) + return false; + + page->dma_addr = dma; + + if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) + page_pool_dma_sync_for_device(pool, page, pool->p.max_len); + + return true; +} + /* slow path */ noinline static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, gfp_t _gfp) { + unsigned int pp_flags = pool->p.flags; struct page *page; gfp_t gfp = _gfp; - dma_addr_t dma; /* We could always set __GFP_COMP, and avoid this branch, as * prep_new_page() can handle order-0 with __GFP_COMP. @@ -211,30 +234,14 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, if (!page) return NULL; - if (!(pool->p.flags & PP_FLAG_DMA_MAP)) - goto skip_dma_map; - - /* Setup DMA mapping: use 'struct page' area for storing DMA-addr - * since dma_addr_t can be either 32 or 64 bits and does not always fit - * into page private data (i.e 32bit cpu with 64bit DMA caps) - * This mapping is kept for lifetime of page, until leaving pool. - */ - dma = dma_map_page_attrs(pool->p.dev, page, 0, - (PAGE_SIZE << pool->p.order), - pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); - if (dma_mapping_error(pool->p.dev, dma)) { + if ((pp_flags & PP_FLAG_DMA_MAP) && + unlikely(!page_pool_dma_map(pool, page))) { put_page(page); return NULL; } - page->dma_addr = dma; - if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) - page_pool_dma_sync_for_device(pool, page, pool->p.max_len); - -skip_dma_map: /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; - trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); /* When page just alloc'ed is should/must have refcnt 1. */ From patchwork Fri Mar 12 15:43:31 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mel Gorman X-Patchwork-Id: 12135059 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-21.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,MENTIONS_GIT_HOSTING,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 590B8C4332B for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 39EEF64FE0 for ; Fri, 12 Mar 2021 15:44:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232324AbhCLPoG (ORCPT ); Fri, 12 Mar 2021 10:44:06 -0500 Received: from outbound-smtp32.blacknight.com ([81.17.249.64]:44192 "EHLO outbound-smtp32.blacknight.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231928AbhCLPnf (ORCPT ); Fri, 12 Mar 2021 10:43:35 -0500 Received: from mail.blacknight.com (pemlinmail06.blacknight.ie [81.17.255.152]) by outbound-smtp32.blacknight.com (Postfix) with ESMTPS id 650F1BEC33 for ; Fri, 12 Mar 2021 15:43:33 +0000 (GMT) Received: (qmail 19934 invoked from network); 12 Mar 2021 15:43:33 -0000 Received: from unknown (HELO stampy.112glenside.lan) (mgorman@techsingularity.net@[84.203.22.4]) by 81.17.254.9 with ESMTPA; 12 Mar 2021 15:43:33 -0000 From: Mel Gorman To: Andrew Morton Cc: Chuck Lever , Jesper Dangaard Brouer , Christoph Hellwig , Alexander Duyck , Matthew Wilcox , LKML , Linux-Net , Linux-MM , Linux-NFS , Mel Gorman Subject: [PATCH 7/7] net: page_pool: use alloc_pages_bulk in refill code path Date: Fri, 12 Mar 2021 15:43:31 +0000 Message-Id: <20210312154331.32229-8-mgorman@techsingularity.net> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20210312154331.32229-1-mgorman@techsingularity.net> References: <20210312154331.32229-1-mgorman@techsingularity.net> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org From: Jesper Dangaard Brouer There are cases where the page_pool need to refill with pages from the page allocator. Some workloads cause the page_pool to release pages instead of recycling these pages. For these workload it can improve performance to bulk alloc pages from the page-allocator to refill the alloc cache. For XDP-redirect workload with 100G mlx5 driver (that use page_pool) redirecting xdp_frame packets into a veth, that does XDP_PASS to create an SKB from the xdp_frame, which then cannot return the page to the page_pool. In this case, we saw[1] an improvement of 18.8% from using the alloc_pages_bulk API (3,677,958 pps -> 4,368,926 pps). [1] https://github.com/xdp-project/xdp-project/blob/master/areas/mem/page_pool06_alloc_pages_bulk.org Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Mel Gorman Reviewed-by: Ilias Apalodimas --- net/core/page_pool.c | 62 ++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 40e1b2beaa6c..a5889f1b86aa 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -208,44 +208,60 @@ noinline static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, gfp_t _gfp) { + const int bulk = PP_ALLOC_CACHE_REFILL; + struct page *page, *next, *first_page; unsigned int pp_flags = pool->p.flags; - struct page *page; + unsigned int pp_order = pool->p.order; + int pp_nid = pool->p.nid; + LIST_HEAD(page_list); gfp_t gfp = _gfp; - /* We could always set __GFP_COMP, and avoid this branch, as - * prep_new_page() can handle order-0 with __GFP_COMP. - */ - if (pool->p.order) + /* Don't support bulk alloc for high-order pages */ + if (unlikely(pp_order)) { gfp |= __GFP_COMP; + first_page = alloc_pages_node(pp_nid, gfp, pp_order); + if (unlikely(!first_page)) + return NULL; + goto out; + } - /* FUTURE development: - * - * Current slow-path essentially falls back to single page - * allocations, which doesn't improve performance. This code - * need bulk allocation support from the page allocator code. - */ - - /* Cache was empty, do real allocation */ -#ifdef CONFIG_NUMA - page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); -#else - page = alloc_pages(gfp, pool->p.order); -#endif - if (!page) + if (unlikely(!__alloc_pages_bulk(gfp, pp_nid, NULL, bulk, &page_list))) return NULL; + /* First page is extracted and returned to caller */ + first_page = list_first_entry(&page_list, struct page, lru); + list_del(&first_page->lru); + + /* Remaining pages store in alloc.cache */ + list_for_each_entry_safe(page, next, &page_list, lru) { + list_del(&page->lru); + if ((pp_flags & PP_FLAG_DMA_MAP) && + unlikely(!page_pool_dma_map(pool, page))) { + put_page(page); + continue; + } + if (likely(pool->alloc.count < PP_ALLOC_CACHE_SIZE)) { + pool->alloc.cache[pool->alloc.count++] = page; + pool->pages_state_hold_cnt++; + trace_page_pool_state_hold(pool, page, + pool->pages_state_hold_cnt); + } else { + put_page(page); + } + } +out: if ((pp_flags & PP_FLAG_DMA_MAP) && - unlikely(!page_pool_dma_map(pool, page))) { - put_page(page); + unlikely(!page_pool_dma_map(pool, first_page))) { + put_page(first_page); return NULL; } /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; - trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); + trace_page_pool_state_hold(pool, first_page, pool->pages_state_hold_cnt); /* When page just alloc'ed is should/must have refcnt 1. */ - return page; + return first_page; } /* For using page_pool replace: alloc_pages() API calls, but provide