Message ID | 20210312154331.32229-6-mgorman@techsingularity.net (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Introduce a bulk order-0 page allocator with two in-tree users | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Guessing tree name failed - patch did not apply |
On Fri, Mar 12, 2021 at 7:43 AM Mel Gorman <mgorman@techsingularity.net> wrote: > > From: Chuck Lever <chuck.lever@oracle.com> > > Reduce the rate at which nfsd threads hammer on the page allocator. > This improves throughput scalability by enabling the threads to run > more independently of each other. > > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > Signed-off-by: Mel Gorman <mgorman@techsingularity.net> > --- > net/sunrpc/svc_xprt.c | 43 +++++++++++++++++++++++++++++++------------ > 1 file changed, 31 insertions(+), 12 deletions(-) > > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c > index cfa7e4776d0e..38a8d6283801 100644 > --- a/net/sunrpc/svc_xprt.c > +++ b/net/sunrpc/svc_xprt.c > @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) > static int svc_alloc_arg(struct svc_rqst *rqstp) > { > struct svc_serv *serv = rqstp->rq_server; > + unsigned long needed; > struct xdr_buf *arg; > + struct page *page; > int pages; > int i; > > - /* now allocate needed pages. If we get a failure, sleep briefly */ > pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; > if (pages > RPCSVC_MAXPAGES) { > pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", > @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) > /* use as many pages as possible */ > pages = RPCSVC_MAXPAGES; > } > - for (i = 0; i < pages ; i++) > - while (rqstp->rq_pages[i] == NULL) { > - struct page *p = alloc_page(GFP_KERNEL); > - if (!p) { > - set_current_state(TASK_INTERRUPTIBLE); > - if (signalled() || kthread_should_stop()) { > - set_current_state(TASK_RUNNING); > - return -EINTR; > - } > - schedule_timeout(msecs_to_jiffies(500)); > + > + for (needed = 0, i = 0; i < pages ; i++) > + if (!rqstp->rq_pages[i]) > + needed++; I would use an opening and closing braces for the for loop since technically the if is a multiline statement. It will make this more readable. > + if (needed) { > + LIST_HEAD(list); > + > +retry: Rather than kind of open code a while loop why not just make this "while (needed)"? Then all you have to do is break out of the for loop and you will automatically return here instead of having to jump to two different labels. > + alloc_pages_bulk(GFP_KERNEL, needed, &list); Rather than not using the return value would it make sense here to perhaps subtract it from needed? Then you would know if any of the allocation requests weren't fulfilled. > + for (i = 0; i < pages; i++) { It is probably optimizing for the exception case, but I don't think you want the "i = 0" here. If you are having to stop because the list is empty it probably makes sense to resume where you left off. So you should probably be initializing i to 0 before we check for needed. > + if (!rqstp->rq_pages[i]) { It might be cleaner here to just do a "continue" if rq_pages[i] is populated. > + page = list_first_entry_or_null(&list, > + struct page, > + lru); > + if (unlikely(!page)) > + goto empty_list; I think I preferred the original code that wasn't jumping away from the loop here. With the change I suggested above that would switch the if(needed) to while(needed) you could have it just break out of the for loop to place itself back in the while loop. > + list_del(&page->lru); > + rqstp->rq_pages[i] = page; > + needed--; > } > - rqstp->rq_pages[i] = p; > } > + } > rqstp->rq_page_end = &rqstp->rq_pages[pages]; > rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ > > @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) > arg->len = (pages-1)*PAGE_SIZE; > arg->tail[0].iov_len = 0; > return 0; > + > +empty_list: > + set_current_state(TASK_INTERRUPTIBLE); > + if (signalled() || kthread_should_stop()) { > + set_current_state(TASK_RUNNING); > + return -EINTR; > + } > + schedule_timeout(msecs_to_jiffies(500)); > + goto retry; > } > > static bool > -- > 2.26.2 >
Mel, I can send you a tidied and tested update to this patch, or you can drop the two NFSD patches and I can submit them via the NFSD tree when alloc_pages_bulk() is merged. > On Mar 12, 2021, at 1:44 PM, Alexander Duyck <alexander.duyck@gmail.com> wrote: > > On Fri, Mar 12, 2021 at 7:43 AM Mel Gorman <mgorman@techsingularity.net> wrote: >> >> From: Chuck Lever <chuck.lever@oracle.com> >> >> Reduce the rate at which nfsd threads hammer on the page allocator. >> This improves throughput scalability by enabling the threads to run >> more independently of each other. >> >> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> >> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> >> --- >> net/sunrpc/svc_xprt.c | 43 +++++++++++++++++++++++++++++++------------ >> 1 file changed, 31 insertions(+), 12 deletions(-) >> >> diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c >> index cfa7e4776d0e..38a8d6283801 100644 >> --- a/net/sunrpc/svc_xprt.c >> +++ b/net/sunrpc/svc_xprt.c >> @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) >> static int svc_alloc_arg(struct svc_rqst *rqstp) >> { >> struct svc_serv *serv = rqstp->rq_server; >> + unsigned long needed; >> struct xdr_buf *arg; >> + struct page *page; >> int pages; >> int i; >> >> - /* now allocate needed pages. If we get a failure, sleep briefly */ >> pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; >> if (pages > RPCSVC_MAXPAGES) { >> pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", >> @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) >> /* use as many pages as possible */ >> pages = RPCSVC_MAXPAGES; >> } >> - for (i = 0; i < pages ; i++) >> - while (rqstp->rq_pages[i] == NULL) { >> - struct page *p = alloc_page(GFP_KERNEL); >> - if (!p) { >> - set_current_state(TASK_INTERRUPTIBLE); >> - if (signalled() || kthread_should_stop()) { >> - set_current_state(TASK_RUNNING); >> - return -EINTR; >> - } >> - schedule_timeout(msecs_to_jiffies(500)); >> + > >> + for (needed = 0, i = 0; i < pages ; i++) >> + if (!rqstp->rq_pages[i]) >> + needed++; > > I would use an opening and closing braces for the for loop since > technically the if is a multiline statement. It will make this more > readable. > >> + if (needed) { >> + LIST_HEAD(list); >> + >> +retry: > > Rather than kind of open code a while loop why not just make this > "while (needed)"? Then all you have to do is break out of the for loop > and you will automatically return here instead of having to jump to > two different labels. > >> + alloc_pages_bulk(GFP_KERNEL, needed, &list); > > Rather than not using the return value would it make sense here to > perhaps subtract it from needed? Then you would know if any of the > allocation requests weren't fulfilled. > >> + for (i = 0; i < pages; i++) { > > It is probably optimizing for the exception case, but I don't think > you want the "i = 0" here. If you are having to stop because the list > is empty it probably makes sense to resume where you left off. So you > should probably be initializing i to 0 before we check for needed. > >> + if (!rqstp->rq_pages[i]) { > > It might be cleaner here to just do a "continue" if rq_pages[i] is populated. > >> + page = list_first_entry_or_null(&list, >> + struct page, >> + lru); >> + if (unlikely(!page)) >> + goto empty_list; > > I think I preferred the original code that wasn't jumping away from > the loop here. With the change I suggested above that would switch the > if(needed) to while(needed) you could have it just break out of the > for loop to place itself back in the while loop. > >> + list_del(&page->lru); >> + rqstp->rq_pages[i] = page; >> + needed--; >> } >> - rqstp->rq_pages[i] = p; >> } >> + } >> rqstp->rq_page_end = &rqstp->rq_pages[pages]; >> rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ >> >> @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) >> arg->len = (pages-1)*PAGE_SIZE; >> arg->tail[0].iov_len = 0; >> return 0; >> + >> +empty_list: >> + set_current_state(TASK_INTERRUPTIBLE); >> + if (signalled() || kthread_should_stop()) { >> + set_current_state(TASK_RUNNING); >> + return -EINTR; >> + } >> + schedule_timeout(msecs_to_jiffies(500)); >> + goto retry; >> } >> >> static bool >> -- >> 2.26.2 -- Chuck Lever
On Fri, Mar 12, 2021 at 07:22:43PM +0000, Chuck Lever III wrote: > Mel, I can send you a tidied and tested update to this patch, > or you can drop the two NFSD patches and I can submit them via > the NFSD tree when alloc_pages_bulk() is merged. > Send me a tidied version anyway. I'm happy enough to include them in the series even if it ultimately gets merged via the NFSD tree. It'll need to be kept as a separate pull request to avoid delaying unrelated NFSD patches until Andrew merges the mm parts.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cfa7e4776d0e..38a8d6283801 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -642,11 +642,12 @@ static void svc_check_conn_limits(struct svc_serv *serv) static int svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; + unsigned long needed; struct xdr_buf *arg; + struct page *page; int pages; int i; - /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; if (pages > RPCSVC_MAXPAGES) { pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", @@ -654,19 +655,28 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) /* use as many pages as possible */ pages = RPCSVC_MAXPAGES; } - for (i = 0; i < pages ; i++) - while (rqstp->rq_pages[i] == NULL) { - struct page *p = alloc_page(GFP_KERNEL); - if (!p) { - set_current_state(TASK_INTERRUPTIBLE); - if (signalled() || kthread_should_stop()) { - set_current_state(TASK_RUNNING); - return -EINTR; - } - schedule_timeout(msecs_to_jiffies(500)); + + for (needed = 0, i = 0; i < pages ; i++) + if (!rqstp->rq_pages[i]) + needed++; + if (needed) { + LIST_HEAD(list); + +retry: + alloc_pages_bulk(GFP_KERNEL, needed, &list); + for (i = 0; i < pages; i++) { + if (!rqstp->rq_pages[i]) { + page = list_first_entry_or_null(&list, + struct page, + lru); + if (unlikely(!page)) + goto empty_list; + list_del(&page->lru); + rqstp->rq_pages[i] = page; + needed--; } - rqstp->rq_pages[i] = p; } + } rqstp->rq_page_end = &rqstp->rq_pages[pages]; rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ @@ -681,6 +691,15 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) arg->len = (pages-1)*PAGE_SIZE; arg->tail[0].iov_len = 0; return 0; + +empty_list: + set_current_state(TASK_INTERRUPTIBLE); + if (signalled() || kthread_should_stop()) { + set_current_state(TASK_RUNNING); + return -EINTR; + } + schedule_timeout(msecs_to_jiffies(500)); + goto retry; } static bool