Message ID | 165730471781.28142.13547044100953437563.stgit@klimt.1015granger.net (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | Overhaul NFSD filecache | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On Fri, 2022-07-08 at 14:25 -0400, Chuck Lever wrote: > There have been reports of problems when running fstests generic/531 > against Linux NFS servers with NFSv4. The NFS server that hosts the > test's SCRATCH_DEV suffers from CPU soft lock-ups during the test. > Analysis shows that: > > fs/nfsd/filecache.c > 482 ret = list_lru_walk(&nfsd_file_lru, > 483 nfsd_file_lru_cb, > 484 &head, LONG_MAX); > > causes nfsd_file_gc() to walk the entire length of the filecache LRU > list every time it is called (which is quite frequently). The walk > holds a spinlock the entire time that prevents other nfsd threads > from accessing the filecache. > > What's more, for NFSv4 workloads, none of the items that are visited > during this walk may be evicted, since they are all files that are > held OPEN by NFS clients. > > Address this by ensuring that open files are not kept on the LRU > list. > > Reported-by: Frank van der Linden <fllinden@amazon.com> > Reported-by: Wang Yugui <wangyugui@e16-tech.com> > Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=386 > Suggested-by: Trond Myklebust <trond.myklebust@hammerspace.com> > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > --- > fs/nfsd/filecache.c | 24 +++++++++++++++++++----- > fs/nfsd/trace.h | 2 ++ > 2 files changed, 21 insertions(+), 5 deletions(-) > > diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c > index 37373b012276..6e9e186334ab 100644 > --- a/fs/nfsd/filecache.c > +++ b/fs/nfsd/filecache.c > @@ -269,6 +269,7 @@ nfsd_file_flush(struct nfsd_file *nf) > > static void nfsd_file_lru_add(struct nfsd_file *nf) > { > + set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); > if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) > trace_nfsd_file_lru_add(nf); > } > @@ -298,7 +299,6 @@ nfsd_file_unhash(struct nfsd_file *nf) > { > if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { > nfsd_file_do_unhash(nf); > - nfsd_file_lru_remove(nf); > return true; > } > return false; > @@ -319,6 +319,7 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp > if (refcount_dec_not_one(&nf->nf_ref)) > return true; > > + nfsd_file_lru_remove(nf); > list_add(&nf->nf_lru, dispose); > return true; > } > @@ -330,6 +331,7 @@ nfsd_file_put_noref(struct nfsd_file *nf) > > if (refcount_dec_and_test(&nf->nf_ref)) { > WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags)); > + nfsd_file_lru_remove(nf); > nfsd_file_free(nf); > } > } > @@ -339,7 +341,7 @@ nfsd_file_put(struct nfsd_file *nf) > { > might_sleep(); > > - set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); > + nfsd_file_lru_add(nf); Do you really want to add this on every put? I would have thought you'd only want to do this on a 2->1 nf_ref transition. > if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) { > nfsd_file_flush(nf); > nfsd_file_put_noref(nf); > @@ -439,8 +441,18 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) > } > } > > -/* > +/** > + * nfsd_file_lru_cb - Examine an entry on the LRU list > + * @item: LRU entry to examine > + * @lru: controlling LRU > + * @lock: LRU list lock (unused) > + * @arg: dispose list > + * > * Note this can deadlock with nfsd_file_cache_purge. > + * > + * Return values: > + * %LRU_REMOVED: @item was removed from the LRU > + * %LRU_SKIP: @item cannot be evicted > */ > static enum lru_status > nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, > @@ -462,8 +474,9 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, > * That order is deliberate to ensure that we can do this locklessly. > */ > if (refcount_read(&nf->nf_ref) > 1) { > + list_lru_isolate(lru, &nf->nf_lru); > trace_nfsd_file_gc_in_use(nf); > - return LRU_SKIP; > + return LRU_REMOVED; Interesting. So you wait until the LRU scanner runs to remove these entries? I expected to see you do this in nfsd_file_get, but this does seem likely to be more efficient. > } > > /* > @@ -1020,6 +1033,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > goto retry; > } > > + nfsd_file_lru_remove(nf); > this_cpu_inc(nfsd_file_cache_hits); > > if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) { > @@ -1055,7 +1069,6 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > refcount_inc(&nf->nf_ref); > __set_bit(NFSD_FILE_HASHED, &nf->nf_flags); > __set_bit(NFSD_FILE_PENDING, &nf->nf_flags); > - nfsd_file_lru_add(nf); > hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head); > ++nfsd_file_hashtbl[hashval].nfb_count; > nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, > @@ -1080,6 +1093,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > */ > if (status != nfs_ok || inode->i_nlink == 0) { > bool do_free; > + nfsd_file_lru_remove(nf); > spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock); > do_free = nfsd_file_unhash(nf); > spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); > diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h > index 1cc1133371eb..54082b868b72 100644 > --- a/fs/nfsd/trace.h > +++ b/fs/nfsd/trace.h > @@ -929,7 +929,9 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \ > TP_ARGS(nf)) > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add); > +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed); > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del); > +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed); > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use); > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback); > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced); > >
> On Jul 8, 2022, at 3:29 PM, Jeff Layton <jlayton@redhat.com> wrote: > > On Fri, 2022-07-08 at 14:25 -0400, Chuck Lever wrote: >> There have been reports of problems when running fstests generic/531 >> against Linux NFS servers with NFSv4. The NFS server that hosts the >> test's SCRATCH_DEV suffers from CPU soft lock-ups during the test. >> Analysis shows that: >> >> fs/nfsd/filecache.c >> 482 ret = list_lru_walk(&nfsd_file_lru, >> 483 nfsd_file_lru_cb, >> 484 &head, LONG_MAX); >> >> causes nfsd_file_gc() to walk the entire length of the filecache LRU >> list every time it is called (which is quite frequently). The walk >> holds a spinlock the entire time that prevents other nfsd threads >> from accessing the filecache. >> >> What's more, for NFSv4 workloads, none of the items that are visited >> during this walk may be evicted, since they are all files that are >> held OPEN by NFS clients. >> >> Address this by ensuring that open files are not kept on the LRU >> list. >> >> Reported-by: Frank van der Linden <fllinden@amazon.com> >> Reported-by: Wang Yugui <wangyugui@e16-tech.com> >> Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=386 >> Suggested-by: Trond Myklebust <trond.myklebust@hammerspace.com> >> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> >> --- >> fs/nfsd/filecache.c | 24 +++++++++++++++++++----- >> fs/nfsd/trace.h | 2 ++ >> 2 files changed, 21 insertions(+), 5 deletions(-) >> >> diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c >> index 37373b012276..6e9e186334ab 100644 >> --- a/fs/nfsd/filecache.c >> +++ b/fs/nfsd/filecache.c >> @@ -269,6 +269,7 @@ nfsd_file_flush(struct nfsd_file *nf) >> >> static void nfsd_file_lru_add(struct nfsd_file *nf) >> { >> + set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); >> if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) >> trace_nfsd_file_lru_add(nf); >> } >> @@ -298,7 +299,6 @@ nfsd_file_unhash(struct nfsd_file *nf) >> { >> if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { >> nfsd_file_do_unhash(nf); >> - nfsd_file_lru_remove(nf); >> return true; >> } >> return false; >> @@ -319,6 +319,7 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp >> if (refcount_dec_not_one(&nf->nf_ref)) >> return true; >> >> + nfsd_file_lru_remove(nf); >> list_add(&nf->nf_lru, dispose); >> return true; >> } >> @@ -330,6 +331,7 @@ nfsd_file_put_noref(struct nfsd_file *nf) >> >> if (refcount_dec_and_test(&nf->nf_ref)) { >> WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags)); >> + nfsd_file_lru_remove(nf); >> nfsd_file_free(nf); >> } >> } >> @@ -339,7 +341,7 @@ nfsd_file_put(struct nfsd_file *nf) >> { >> might_sleep(); >> >> - set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); >> + nfsd_file_lru_add(nf); > > Do you really want to add this on every put? I would have thought you'd > only want to do this on a 2->1 nf_ref transition. My measurements indicate that 2->1 is the common case, so checking that this is /not/ a 2->1 transition doesn't confer much if any benefit. Under load, I don't see any contention on the LRU locks, which is where I'd expect to see a problem if this design were not efficient. >> if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) { >> nfsd_file_flush(nf); >> nfsd_file_put_noref(nf); >> @@ -439,8 +441,18 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) >> } >> } >> >> -/* >> +/** >> + * nfsd_file_lru_cb - Examine an entry on the LRU list >> + * @item: LRU entry to examine >> + * @lru: controlling LRU >> + * @lock: LRU list lock (unused) >> + * @arg: dispose list >> + * >> * Note this can deadlock with nfsd_file_cache_purge. >> + * >> + * Return values: >> + * %LRU_REMOVED: @item was removed from the LRU >> + * %LRU_SKIP: @item cannot be evicted >> */ >> static enum lru_status >> nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, >> @@ -462,8 +474,9 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, >> * That order is deliberate to ensure that we can do this locklessly. >> */ >> if (refcount_read(&nf->nf_ref) > 1) { >> + list_lru_isolate(lru, &nf->nf_lru); >> trace_nfsd_file_gc_in_use(nf); >> - return LRU_SKIP; >> + return LRU_REMOVED; > > Interesting. So you wait until the LRU scanner runs to remove these > entries? I expected to see you do this in nfsd_file_get, but this does > seem likely to be more efficient. > >> } >> >> /* >> @@ -1020,6 +1033,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, >> goto retry; >> } >> >> + nfsd_file_lru_remove(nf); >> this_cpu_inc(nfsd_file_cache_hits); >> >> if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) { >> @@ -1055,7 +1069,6 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, >> refcount_inc(&nf->nf_ref); >> __set_bit(NFSD_FILE_HASHED, &nf->nf_flags); >> __set_bit(NFSD_FILE_PENDING, &nf->nf_flags); >> - nfsd_file_lru_add(nf); >> hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head); >> ++nfsd_file_hashtbl[hashval].nfb_count; >> nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, >> @@ -1080,6 +1093,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, >> */ >> if (status != nfs_ok || inode->i_nlink == 0) { >> bool do_free; >> + nfsd_file_lru_remove(nf); >> spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock); >> do_free = nfsd_file_unhash(nf); >> spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); >> diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h >> index 1cc1133371eb..54082b868b72 100644 >> --- a/fs/nfsd/trace.h >> +++ b/fs/nfsd/trace.h >> @@ -929,7 +929,9 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \ >> TP_ARGS(nf)) >> >> DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add); >> +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed); >> DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del); >> +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed); >> DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use); >> DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback); >> DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced); >> >> > > -- > Jeff Layton <jlayton@redhat.com> -- Chuck Lever
On Sat, 2022-07-09 at 20:45 +0000, Chuck Lever III wrote: > > > On Jul 8, 2022, at 3:29 PM, Jeff Layton <jlayton@redhat.com> wrote: > > > > On Fri, 2022-07-08 at 14:25 -0400, Chuck Lever wrote: > > > There have been reports of problems when running fstests generic/531 > > > against Linux NFS servers with NFSv4. The NFS server that hosts the > > > test's SCRATCH_DEV suffers from CPU soft lock-ups during the test. > > > Analysis shows that: > > > > > > fs/nfsd/filecache.c > > > 482 ret = list_lru_walk(&nfsd_file_lru, > > > 483 nfsd_file_lru_cb, > > > 484 &head, LONG_MAX); > > > > > > causes nfsd_file_gc() to walk the entire length of the filecache LRU > > > list every time it is called (which is quite frequently). The walk > > > holds a spinlock the entire time that prevents other nfsd threads > > > from accessing the filecache. > > > > > > What's more, for NFSv4 workloads, none of the items that are visited > > > during this walk may be evicted, since they are all files that are > > > held OPEN by NFS clients. > > > > > > Address this by ensuring that open files are not kept on the LRU > > > list. > > > > > > Reported-by: Frank van der Linden <fllinden@amazon.com> > > > Reported-by: Wang Yugui <wangyugui@e16-tech.com> > > > Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=386 > > > Suggested-by: Trond Myklebust <trond.myklebust@hammerspace.com> > > > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > > > --- > > > fs/nfsd/filecache.c | 24 +++++++++++++++++++----- > > > fs/nfsd/trace.h | 2 ++ > > > 2 files changed, 21 insertions(+), 5 deletions(-) > > > > > > diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c > > > index 37373b012276..6e9e186334ab 100644 > > > --- a/fs/nfsd/filecache.c > > > +++ b/fs/nfsd/filecache.c > > > @@ -269,6 +269,7 @@ nfsd_file_flush(struct nfsd_file *nf) > > > > > > static void nfsd_file_lru_add(struct nfsd_file *nf) > > > { > > > + set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); > > > if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) > > > trace_nfsd_file_lru_add(nf); > > > } > > > @@ -298,7 +299,6 @@ nfsd_file_unhash(struct nfsd_file *nf) > > > { > > > if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { > > > nfsd_file_do_unhash(nf); > > > - nfsd_file_lru_remove(nf); > > > return true; > > > } > > > return false; > > > @@ -319,6 +319,7 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp > > > if (refcount_dec_not_one(&nf->nf_ref)) > > > return true; > > > > > > + nfsd_file_lru_remove(nf); > > > list_add(&nf->nf_lru, dispose); > > > return true; > > > } > > > @@ -330,6 +331,7 @@ nfsd_file_put_noref(struct nfsd_file *nf) > > > > > > if (refcount_dec_and_test(&nf->nf_ref)) { > > > WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags)); > > > + nfsd_file_lru_remove(nf); > > > nfsd_file_free(nf); > > > } > > > } > > > @@ -339,7 +341,7 @@ nfsd_file_put(struct nfsd_file *nf) > > > { > > > might_sleep(); > > > > > > - set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); > > > + nfsd_file_lru_add(nf); > > > > Do you really want to add this on every put? I would have thought you'd > > only want to do this on a 2->1 nf_ref transition. > > My measurements indicate that 2->1 is the common case, so checking > that this is /not/ a 2->1 transition doesn't confer much if any > benefit. > > Under load, I don't see any contention on the LRU locks, which is > where I'd expect to see a problem if this design were not efficient. > > Fair enough. I guess the idea is to throw it onto the LRU and the scanner will just (eventually) take it off again without reaping it. You can add my Reviewed-by: to this one as well. > > > if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) { > > > nfsd_file_flush(nf); > > > nfsd_file_put_noref(nf); > > > @@ -439,8 +441,18 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) > > > } > > > } > > > > > > -/* > > > +/** > > > + * nfsd_file_lru_cb - Examine an entry on the LRU list > > > + * @item: LRU entry to examine > > > + * @lru: controlling LRU > > > + * @lock: LRU list lock (unused) > > > + * @arg: dispose list > > > + * > > > * Note this can deadlock with nfsd_file_cache_purge. > > > + * > > > + * Return values: > > > + * %LRU_REMOVED: @item was removed from the LRU > > > + * %LRU_SKIP: @item cannot be evicted > > > */ > > > static enum lru_status > > > nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, > > > @@ -462,8 +474,9 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, > > > * That order is deliberate to ensure that we can do this locklessly. > > > */ > > > if (refcount_read(&nf->nf_ref) > 1) { > > > + list_lru_isolate(lru, &nf->nf_lru); > > > trace_nfsd_file_gc_in_use(nf); > > > - return LRU_SKIP; > > > + return LRU_REMOVED; > > > > Interesting. So you wait until the LRU scanner runs to remove these > > entries? I expected to see you do this in nfsd_file_get, but this does > > seem likely to be more efficient. > > > > > } > > > > > > /* > > > @@ -1020,6 +1033,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > > > goto retry; > > > } > > > > > > + nfsd_file_lru_remove(nf); > > > this_cpu_inc(nfsd_file_cache_hits); > > > > > > if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) { > > > @@ -1055,7 +1069,6 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > > > refcount_inc(&nf->nf_ref); > > > __set_bit(NFSD_FILE_HASHED, &nf->nf_flags); > > > __set_bit(NFSD_FILE_PENDING, &nf->nf_flags); > > > - nfsd_file_lru_add(nf); > > > hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head); > > > ++nfsd_file_hashtbl[hashval].nfb_count; > > > nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, > > > @@ -1080,6 +1093,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, > > > */ > > > if (status != nfs_ok || inode->i_nlink == 0) { > > > bool do_free; > > > + nfsd_file_lru_remove(nf); > > > spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock); > > > do_free = nfsd_file_unhash(nf); > > > spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); > > > diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h > > > index 1cc1133371eb..54082b868b72 100644 > > > --- a/fs/nfsd/trace.h > > > +++ b/fs/nfsd/trace.h > > > @@ -929,7 +929,9 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \ > > > TP_ARGS(nf)) > > > > > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add); > > > +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed); > > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del); > > > +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed); > > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use); > > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback); > > > DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced); > > > > > > > > > > -- > > Jeff Layton <jlayton@redhat.com> > > -- > Chuck Lever > > >
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 37373b012276..6e9e186334ab 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -269,6 +269,7 @@ nfsd_file_flush(struct nfsd_file *nf) static void nfsd_file_lru_add(struct nfsd_file *nf) { + set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); if (list_lru_add(&nfsd_file_lru, &nf->nf_lru)) trace_nfsd_file_lru_add(nf); } @@ -298,7 +299,6 @@ nfsd_file_unhash(struct nfsd_file *nf) { if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) { nfsd_file_do_unhash(nf); - nfsd_file_lru_remove(nf); return true; } return false; @@ -319,6 +319,7 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp if (refcount_dec_not_one(&nf->nf_ref)) return true; + nfsd_file_lru_remove(nf); list_add(&nf->nf_lru, dispose); return true; } @@ -330,6 +331,7 @@ nfsd_file_put_noref(struct nfsd_file *nf) if (refcount_dec_and_test(&nf->nf_ref)) { WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags)); + nfsd_file_lru_remove(nf); nfsd_file_free(nf); } } @@ -339,7 +341,7 @@ nfsd_file_put(struct nfsd_file *nf) { might_sleep(); - set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags); + nfsd_file_lru_add(nf); if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) { nfsd_file_flush(nf); nfsd_file_put_noref(nf); @@ -439,8 +441,18 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) } } -/* +/** + * nfsd_file_lru_cb - Examine an entry on the LRU list + * @item: LRU entry to examine + * @lru: controlling LRU + * @lock: LRU list lock (unused) + * @arg: dispose list + * * Note this can deadlock with nfsd_file_cache_purge. + * + * Return values: + * %LRU_REMOVED: @item was removed from the LRU + * %LRU_SKIP: @item cannot be evicted */ static enum lru_status nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, @@ -462,8 +474,9 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, * That order is deliberate to ensure that we can do this locklessly. */ if (refcount_read(&nf->nf_ref) > 1) { + list_lru_isolate(lru, &nf->nf_lru); trace_nfsd_file_gc_in_use(nf); - return LRU_SKIP; + return LRU_REMOVED; } /* @@ -1020,6 +1033,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, goto retry; } + nfsd_file_lru_remove(nf); this_cpu_inc(nfsd_file_cache_hits); if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) { @@ -1055,7 +1069,6 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, refcount_inc(&nf->nf_ref); __set_bit(NFSD_FILE_HASHED, &nf->nf_flags); __set_bit(NFSD_FILE_PENDING, &nf->nf_flags); - nfsd_file_lru_add(nf); hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head); ++nfsd_file_hashtbl[hashval].nfb_count; nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount, @@ -1080,6 +1093,7 @@ nfsd_do_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, */ if (status != nfs_ok || inode->i_nlink == 0) { bool do_free; + nfsd_file_lru_remove(nf); spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock); do_free = nfsd_file_unhash(nf); spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock); diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 1cc1133371eb..54082b868b72 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -929,7 +929,9 @@ DEFINE_EVENT(nfsd_file_gc_class, name, \ TP_ARGS(nf)) DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add); +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_add_disposed); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del); +DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback); DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
There have been reports of problems when running fstests generic/531 against Linux NFS servers with NFSv4. The NFS server that hosts the test's SCRATCH_DEV suffers from CPU soft lock-ups during the test. Analysis shows that: fs/nfsd/filecache.c 482 ret = list_lru_walk(&nfsd_file_lru, 483 nfsd_file_lru_cb, 484 &head, LONG_MAX); causes nfsd_file_gc() to walk the entire length of the filecache LRU list every time it is called (which is quite frequently). The walk holds a spinlock the entire time that prevents other nfsd threads from accessing the filecache. What's more, for NFSv4 workloads, none of the items that are visited during this walk may be evicted, since they are all files that are held OPEN by NFS clients. Address this by ensuring that open files are not kept on the LRU list. Reported-by: Frank van der Linden <fllinden@amazon.com> Reported-by: Wang Yugui <wangyugui@e16-tech.com> Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=386 Suggested-by: Trond Myklebust <trond.myklebust@hammerspace.com> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> --- fs/nfsd/filecache.c | 24 +++++++++++++++++++----- fs/nfsd/trace.h | 2 ++ 2 files changed, 21 insertions(+), 5 deletions(-)