Message ID | 20210311114935.11379-4-mgorman@techsingularity.net (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Introduce a bulk order-0 page allocator with two in-tree users | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Guessed tree name to be net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | warning | 5 maintainers not CCed: anna.schumaker@netapp.com trond.myklebust@hammerspace.com bfields@fieldses.org davem@davemloft.net kuba@kernel.org |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 0 this patch: 0 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 67 lines checked |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 0 this patch: 0 |
netdev/header_inline | success | Link |
> On Mar 11, 2021, at 6:49 AM, Mel Gorman <mgorman@techsingularity.net> wrote: > > From: Chuck Lever <chuck.lever@oracle.com> > > Reduce the rate at which nfsd threads hammer on the page allocator. > This improve throughput scalability by enabling the threads to run > more independently of each other. Mel, if you should repost this series: ^improve^improves > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > Signed-off-by: Mel Gorman <mgorman@techsingularity.net> > --- > net/sunrpc/svc_xprt.c | 43 +++++++++++++++++++++++++++++++------------ > 1 file changed, 31 insertions(+), 12 deletions(-) > > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c > index cfa7e4776d0e..38a8d6283801 100644 > --- a/net/sunrpc/svc_xprt.c > +++ b/net/sunrpc/svc_xprt.c > @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) > static int svc_alloc_arg(struct svc_rqst *rqstp) > { > struct svc_serv *serv = rqstp->rq_server; > + unsigned long needed; > struct xdr_buf *arg; > + struct page *page; > int pages; > int i; > > - /* now allocate needed pages. If we get a failure, sleep briefly */ > pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; > if (pages > RPCSVC_MAXPAGES) { > pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", > @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) > /* use as many pages as possible */ > pages = RPCSVC_MAXPAGES; > } > - for (i = 0; i < pages ; i++) > - while (rqstp->rq_pages[i] == NULL) { > - struct page *p = alloc_page(GFP_KERNEL); > - if (!p) { > - set_current_state(TASK_INTERRUPTIBLE); > - if (signalled() || kthread_should_stop()) { > - set_current_state(TASK_RUNNING); > - return -EINTR; > - } > - schedule_timeout(msecs_to_jiffies(500)); > + > + for (needed = 0, i = 0; i < pages ; i++) > + if (!rqstp->rq_pages[i]) > + needed++; > + if (needed) { > + LIST_HEAD(list); > + > +retry: > + alloc_pages_bulk(GFP_KERNEL, needed, &list); > + for (i = 0; i < pages; i++) { > + if (!rqstp->rq_pages[i]) { > + page = list_first_entry_or_null(&list, > + struct page, > + lru); > + if (unlikely(!page)) > + goto empty_list; > + list_del(&page->lru); > + rqstp->rq_pages[i] = page; > + needed--; > } > - rqstp->rq_pages[i] = p; > } > + } > rqstp->rq_page_end = &rqstp->rq_pages[pages]; > rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ > > @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) > arg->len = (pages-1)*PAGE_SIZE; > arg->tail[0].iov_len = 0; > return 0; > + > +empty_list: > + set_current_state(TASK_INTERRUPTIBLE); > + if (signalled() || kthread_should_stop()) { > + set_current_state(TASK_RUNNING); > + return -EINTR; > + } > + schedule_timeout(msecs_to_jiffies(500)); > + goto retry; > } > > static bool > -- > 2.26.2 > -- Chuck Lever
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cfa7e4776d0e..38a8d6283801 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) static int svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; + unsigned long needed; struct xdr_buf *arg; + struct page *page; int pages; int i; - /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; if (pages > RPCSVC_MAXPAGES) { pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) /* use as many pages as possible */ pages = RPCSVC_MAXPAGES; } - for (i = 0; i < pages ; i++) - while (rqstp->rq_pages[i] == NULL) { - struct page *p = alloc_page(GFP_KERNEL); - if (!p) { - set_current_state(TASK_INTERRUPTIBLE); - if (signalled() || kthread_should_stop()) { - set_current_state(TASK_RUNNING); - return -EINTR; - } - schedule_timeout(msecs_to_jiffies(500)); + + for (needed = 0, i = 0; i < pages ; i++) + if (!rqstp->rq_pages[i]) + needed++; + if (needed) { + LIST_HEAD(list); + +retry: + alloc_pages_bulk(GFP_KERNEL, needed, &list); + for (i = 0; i < pages; i++) { + if (!rqstp->rq_pages[i]) { + page = list_first_entry_or_null(&list, + struct page, + lru); + if (unlikely(!page)) + goto empty_list; + list_del(&page->lru); + rqstp->rq_pages[i] = page; + needed--; } - rqstp->rq_pages[i] = p; } + } rqstp->rq_page_end = &rqstp->rq_pages[pages]; rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) arg->len = (pages-1)*PAGE_SIZE; arg->tail[0].iov_len = 0; return 0; + +empty_list: + set_current_state(TASK_INTERRUPTIBLE); + if (signalled() || kthread_should_stop()) { + set_current_state(TASK_RUNNING); + return -EINTR; + } + schedule_timeout(msecs_to_jiffies(500)); + goto retry; } static bool