Message ID | 166696846122.106044.14636201700548704756.stgit@klimt.1015granger.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | A course adjustment, for sure... | expand |
On Fri, 2022-10-28 at 10:47 -0400, Chuck Lever wrote: > Remove the call to find_file_locked() in insert_nfs4_file(). Tracing > shows that over 99% of these calls return NULL. Thus it is not worth > the expense of the extra bucket list traversal. insert_file() already > deals correctly with the case where the item is already in the hash > bucket. > > Since nfsd4_file_hash_insert() is now just a wrapper around > insert_file(), move the meat of insert_file() into > nfsd4_file_hash_insert() and get rid of it. > > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > Reviewed-by: NeilBrown <neilb@suse.de> > --- > fs/nfsd/nfs4state.c | 64 ++++++++++++++++++++++----------------------------- > 1 file changed, 28 insertions(+), 36 deletions(-) > > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c > index 504455cb80e9..b1988a46fb9b 100644 > --- a/fs/nfsd/nfs4state.c > +++ b/fs/nfsd/nfs4state.c > @@ -4683,24 +4683,42 @@ find_file_locked(const struct svc_fh *fh, unsigned int hashval) > return NULL; > } > > -static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, > - unsigned int hashval) > +static struct nfs4_file * find_file(struct svc_fh *fh) > { > struct nfs4_file *fp; > + unsigned int hashval = file_hashval(fh); > + > + rcu_read_lock(); > + fp = find_file_locked(fh, hashval); > + rcu_read_unlock(); > + return fp; > +} > + > +/* > + * On hash insertion, identify entries with the same inode but > + * distinct filehandles. They will all be in the same hash bucket > + * because nfs4_file's are hashed by the address in the fi_inode > + * field. > + */ > +static noinline_for_stack struct nfs4_file * > +nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) > +{ > + unsigned int hashval = file_hashval(fhp); > struct nfs4_file *ret = NULL; > bool alias_found = false; > + struct nfs4_file *fi; > > spin_lock(&state_lock); > - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, > + hlist_for_each_entry_rcu(fi, &file_hashtbl[hashval], fi_hash, > lockdep_is_held(&state_lock)) { > - if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { > - if (refcount_inc_not_zero(&fp->fi_ref)) > - ret = fp; > - } else if (d_inode(fh->fh_dentry) == fp->fi_inode) > - fp->fi_aliased = alias_found = true; > + if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { > + if (refcount_inc_not_zero(&fi->fi_ref)) > + ret = fi; > + } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) > + fi->fi_aliased = alias_found = true; Would it not be better to do the inode comparison first? That's just a pointer check vs. a full memcmp. That would allow you to quickly rule out any entries that match different inodes but that are on the same hash chain. > } > if (likely(ret == NULL)) { > - nfsd4_file_init(fh, new); > + nfsd4_file_init(fhp, new); > hlist_add_head_rcu(&new->fi_hash, &file_hashtbl[hashval]); > new->fi_aliased = alias_found; > ret = new; > @@ -4709,32 +4727,6 @@ static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, > return ret; > } > > -static struct nfs4_file * find_file(struct svc_fh *fh) > -{ > - struct nfs4_file *fp; > - unsigned int hashval = file_hashval(fh); > - > - rcu_read_lock(); > - fp = find_file_locked(fh, hashval); > - rcu_read_unlock(); > - return fp; > -} > - > -static struct nfs4_file * > -find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) > -{ > - struct nfs4_file *fp; > - unsigned int hashval = file_hashval(fh); > - > - rcu_read_lock(); > - fp = find_file_locked(fh, hashval); > - rcu_read_unlock(); > - if (fp) > - return fp; > - > - return insert_file(new, fh, hashval); > -} > - > static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) > { > hlist_del_rcu(&fi->fi_hash); > @@ -5625,7 +5617,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf > * and check for delegations in the process of being recalled. > * If not found, create the nfs4_file struct > */ > - fp = find_or_add_file(open->op_file, current_fh); > + fp = nfsd4_file_hash_insert(open->op_file, current_fh); > if (fp != open->op_file) { > status = nfs4_check_deleg(cl, open, &dp); > if (status) > >
> On Oct 31, 2022, at 12:43 PM, Jeff Layton <jlayton@kernel.org> wrote: > > On Fri, 2022-10-28 at 10:47 -0400, Chuck Lever wrote: >> Remove the call to find_file_locked() in insert_nfs4_file(). Tracing >> shows that over 99% of these calls return NULL. Thus it is not worth >> the expense of the extra bucket list traversal. insert_file() already >> deals correctly with the case where the item is already in the hash >> bucket. >> >> Since nfsd4_file_hash_insert() is now just a wrapper around >> insert_file(), move the meat of insert_file() into >> nfsd4_file_hash_insert() and get rid of it. >> >> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> >> Reviewed-by: NeilBrown <neilb@suse.de> >> --- >> fs/nfsd/nfs4state.c | 64 ++++++++++++++++++++++----------------------------- >> 1 file changed, 28 insertions(+), 36 deletions(-) >> >> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c >> index 504455cb80e9..b1988a46fb9b 100644 >> --- a/fs/nfsd/nfs4state.c >> +++ b/fs/nfsd/nfs4state.c >> @@ -4683,24 +4683,42 @@ find_file_locked(const struct svc_fh *fh, unsigned int hashval) >> return NULL; >> } >> >> -static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, >> - unsigned int hashval) >> +static struct nfs4_file * find_file(struct svc_fh *fh) >> { >> struct nfs4_file *fp; >> + unsigned int hashval = file_hashval(fh); >> + >> + rcu_read_lock(); >> + fp = find_file_locked(fh, hashval); >> + rcu_read_unlock(); >> + return fp; >> +} >> + >> +/* >> + * On hash insertion, identify entries with the same inode but >> + * distinct filehandles. They will all be in the same hash bucket >> + * because nfs4_file's are hashed by the address in the fi_inode >> + * field. >> + */ >> +static noinline_for_stack struct nfs4_file * >> +nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) >> +{ >> + unsigned int hashval = file_hashval(fhp); >> struct nfs4_file *ret = NULL; >> bool alias_found = false; >> + struct nfs4_file *fi; >> >> spin_lock(&state_lock); >> - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, >> + hlist_for_each_entry_rcu(fi, &file_hashtbl[hashval], fi_hash, >> lockdep_is_held(&state_lock)) { >> - if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { >> - if (refcount_inc_not_zero(&fp->fi_ref)) >> - ret = fp; >> - } else if (d_inode(fh->fh_dentry) == fp->fi_inode) >> - fp->fi_aliased = alias_found = true; >> + if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { >> + if (refcount_inc_not_zero(&fi->fi_ref)) >> + ret = fi; >> + } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) >> + fi->fi_aliased = alias_found = true; > > Would it not be better to do the inode comparison first? That's just a > pointer check vs. a full memcmp. That would allow you to quickly rule > out any entries that match different inodes but that are on the same > hash chain. Marginally better: The usual case after the rhltable changes are applied is that the returned list contains only entries that match d_inode(fhp->fh_dentry), and usually that list has just one entry in it that matches the FH. Since 11/14 is billed as a clean-up, I'd prefer to put that kind of optimization in a separate patch that can be mechanically reverted if need be. I'll post something that goes on top of this series. >> } >> if (likely(ret == NULL)) { >> - nfsd4_file_init(fh, new); >> + nfsd4_file_init(fhp, new); >> hlist_add_head_rcu(&new->fi_hash, &file_hashtbl[hashval]); >> new->fi_aliased = alias_found; >> ret = new; >> @@ -4709,32 +4727,6 @@ static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, >> return ret; >> } >> >> -static struct nfs4_file * find_file(struct svc_fh *fh) >> -{ >> - struct nfs4_file *fp; >> - unsigned int hashval = file_hashval(fh); >> - >> - rcu_read_lock(); >> - fp = find_file_locked(fh, hashval); >> - rcu_read_unlock(); >> - return fp; >> -} >> - >> -static struct nfs4_file * >> -find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) >> -{ >> - struct nfs4_file *fp; >> - unsigned int hashval = file_hashval(fh); >> - >> - rcu_read_lock(); >> - fp = find_file_locked(fh, hashval); >> - rcu_read_unlock(); >> - if (fp) >> - return fp; >> - >> - return insert_file(new, fh, hashval); >> -} >> - >> static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) >> { >> hlist_del_rcu(&fi->fi_hash); >> @@ -5625,7 +5617,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf >> * and check for delegations in the process of being recalled. >> * If not found, create the nfs4_file struct >> */ >> - fp = find_or_add_file(open->op_file, current_fh); >> + fp = nfsd4_file_hash_insert(open->op_file, current_fh); >> if (fp != open->op_file) { >> status = nfs4_check_deleg(cl, open, &dp); >> if (status) >> >> > > -- > Jeff Layton <jlayton@kernel.org> -- Chuck Lever
On Mon, 2022-10-31 at 17:28 +0000, Chuck Lever III wrote: > > > On Oct 31, 2022, at 12:43 PM, Jeff Layton <jlayton@kernel.org> wrote: > > > > On Fri, 2022-10-28 at 10:47 -0400, Chuck Lever wrote: > > > Remove the call to find_file_locked() in insert_nfs4_file(). Tracing > > > shows that over 99% of these calls return NULL. Thus it is not worth > > > the expense of the extra bucket list traversal. insert_file() already > > > deals correctly with the case where the item is already in the hash > > > bucket. > > > > > > Since nfsd4_file_hash_insert() is now just a wrapper around > > > insert_file(), move the meat of insert_file() into > > > nfsd4_file_hash_insert() and get rid of it. > > > > > > Signed-off-by: Chuck Lever <chuck.lever@oracle.com> > > > Reviewed-by: NeilBrown <neilb@suse.de> > > > --- > > > fs/nfsd/nfs4state.c | 64 ++++++++++++++++++++++----------------------------- > > > 1 file changed, 28 insertions(+), 36 deletions(-) > > > > > > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c > > > index 504455cb80e9..b1988a46fb9b 100644 > > > --- a/fs/nfsd/nfs4state.c > > > +++ b/fs/nfsd/nfs4state.c > > > @@ -4683,24 +4683,42 @@ find_file_locked(const struct svc_fh *fh, unsigned int hashval) > > > return NULL; > > > } > > > > > > -static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, > > > - unsigned int hashval) > > > +static struct nfs4_file * find_file(struct svc_fh *fh) > > > { > > > struct nfs4_file *fp; > > > + unsigned int hashval = file_hashval(fh); > > > + > > > + rcu_read_lock(); > > > + fp = find_file_locked(fh, hashval); > > > + rcu_read_unlock(); > > > + return fp; > > > +} > > > + > > > +/* > > > + * On hash insertion, identify entries with the same inode but > > > + * distinct filehandles. They will all be in the same hash bucket > > > + * because nfs4_file's are hashed by the address in the fi_inode > > > + * field. > > > + */ > > > +static noinline_for_stack struct nfs4_file * > > > +nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) > > > +{ > > > + unsigned int hashval = file_hashval(fhp); > > > struct nfs4_file *ret = NULL; > > > bool alias_found = false; > > > + struct nfs4_file *fi; > > > > > > spin_lock(&state_lock); > > > - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, > > > + hlist_for_each_entry_rcu(fi, &file_hashtbl[hashval], fi_hash, > > > lockdep_is_held(&state_lock)) { > > > - if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { > > > - if (refcount_inc_not_zero(&fp->fi_ref)) > > > - ret = fp; > > > - } else if (d_inode(fh->fh_dentry) == fp->fi_inode) > > > - fp->fi_aliased = alias_found = true; > > > + if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { > > > + if (refcount_inc_not_zero(&fi->fi_ref)) > > > + ret = fi; > > > + } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) > > > + fi->fi_aliased = alias_found = true; > > > > Would it not be better to do the inode comparison first? That's just a > > pointer check vs. a full memcmp. That would allow you to quickly rule > > out any entries that match different inodes but that are on the same > > hash chain. > > Marginally better: The usual case after the rhltable changes are > applied is that the returned list contains only entries that match > d_inode(fhp->fh_dentry), and usually that list has just one entry > in it that matches the FH. > That depends entirely on workload. Given that you start with 512 buckets, you'd need to be working with at least that many inodes concurrently to make that happen, but it could easily happen with a large enough working set. > Since 11/14 is billed as a clean-up, I'd prefer to put that kind > of optimization in a separate patch that can be mechanically > reverted if need be. I'll post something that goes on top of this > series. > Fair enough. You can add my Reviewed-by: and we can address it later. > > > > } > > > if (likely(ret == NULL)) { > > > - nfsd4_file_init(fh, new); > > > + nfsd4_file_init(fhp, new); > > > hlist_add_head_rcu(&new->fi_hash, &file_hashtbl[hashval]); > > > new->fi_aliased = alias_found; > > > ret = new; > > > @@ -4709,32 +4727,6 @@ static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, > > > return ret; > > > } > > > > > > -static struct nfs4_file * find_file(struct svc_fh *fh) > > > -{ > > > - struct nfs4_file *fp; > > > - unsigned int hashval = file_hashval(fh); > > > - > > > - rcu_read_lock(); > > > - fp = find_file_locked(fh, hashval); > > > - rcu_read_unlock(); > > > - return fp; > > > -} > > > - > > > -static struct nfs4_file * > > > -find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) > > > -{ > > > - struct nfs4_file *fp; > > > - unsigned int hashval = file_hashval(fh); > > > - > > > - rcu_read_lock(); > > > - fp = find_file_locked(fh, hashval); > > > - rcu_read_unlock(); > > > - if (fp) > > > - return fp; > > > - > > > - return insert_file(new, fh, hashval); > > > -} > > > - > > > static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) > > > { > > > hlist_del_rcu(&fi->fi_hash); > > > @@ -5625,7 +5617,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf > > > * and check for delegations in the process of being recalled. > > > * If not found, create the nfs4_file struct > > > */ > > > - fp = find_or_add_file(open->op_file, current_fh); > > > + fp = nfsd4_file_hash_insert(open->op_file, current_fh); > > > if (fp != open->op_file) { > > > status = nfs4_check_deleg(cl, open, &dp); > > > if (status) > > > > > > > > > > -- > > Jeff Layton <jlayton@kernel.org> > > -- > Chuck Lever > > >
> On Oct 31, 2022, at 1:36 PM, Jeff Layton <jlayton@kernel.org> wrote: > > On Mon, 2022-10-31 at 17:28 +0000, Chuck Lever III wrote: >> >>> On Oct 31, 2022, at 12:43 PM, Jeff Layton <jlayton@kernel.org> wrote: >>> >>> On Fri, 2022-10-28 at 10:47 -0400, Chuck Lever wrote: >>>> Remove the call to find_file_locked() in insert_nfs4_file(). Tracing >>>> shows that over 99% of these calls return NULL. Thus it is not worth >>>> the expense of the extra bucket list traversal. insert_file() already >>>> deals correctly with the case where the item is already in the hash >>>> bucket. >>>> >>>> Since nfsd4_file_hash_insert() is now just a wrapper around >>>> insert_file(), move the meat of insert_file() into >>>> nfsd4_file_hash_insert() and get rid of it. >>>> >>>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> >>>> Reviewed-by: NeilBrown <neilb@suse.de> >>>> --- >>>> fs/nfsd/nfs4state.c | 64 ++++++++++++++++++++++----------------------------- >>>> 1 file changed, 28 insertions(+), 36 deletions(-) >>>> >>>> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c >>>> index 504455cb80e9..b1988a46fb9b 100644 >>>> --- a/fs/nfsd/nfs4state.c >>>> +++ b/fs/nfsd/nfs4state.c >>>> @@ -4683,24 +4683,42 @@ find_file_locked(const struct svc_fh *fh, unsigned int hashval) >>>> return NULL; >>>> } >>>> >>>> -static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, >>>> - unsigned int hashval) >>>> +static struct nfs4_file * find_file(struct svc_fh *fh) >>>> { >>>> struct nfs4_file *fp; >>>> + unsigned int hashval = file_hashval(fh); >>>> + >>>> + rcu_read_lock(); >>>> + fp = find_file_locked(fh, hashval); >>>> + rcu_read_unlock(); >>>> + return fp; >>>> +} >>>> + >>>> +/* >>>> + * On hash insertion, identify entries with the same inode but >>>> + * distinct filehandles. They will all be in the same hash bucket >>>> + * because nfs4_file's are hashed by the address in the fi_inode >>>> + * field. >>>> + */ >>>> +static noinline_for_stack struct nfs4_file * >>>> +nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) >>>> +{ >>>> + unsigned int hashval = file_hashval(fhp); >>>> struct nfs4_file *ret = NULL; >>>> bool alias_found = false; >>>> + struct nfs4_file *fi; >>>> >>>> spin_lock(&state_lock); >>>> - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, >>>> + hlist_for_each_entry_rcu(fi, &file_hashtbl[hashval], fi_hash, >>>> lockdep_is_held(&state_lock)) { >>>> - if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { >>>> - if (refcount_inc_not_zero(&fp->fi_ref)) >>>> - ret = fp; >>>> - } else if (d_inode(fh->fh_dentry) == fp->fi_inode) >>>> - fp->fi_aliased = alias_found = true; >>>> + if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { >>>> + if (refcount_inc_not_zero(&fi->fi_ref)) >>>> + ret = fi; >>>> + } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) >>>> + fi->fi_aliased = alias_found = true; >>> >>> Would it not be better to do the inode comparison first? That's just a >>> pointer check vs. a full memcmp. That would allow you to quickly rule >>> out any entries that match different inodes but that are on the same >>> hash chain. >> >> Marginally better: The usual case after the rhltable changes are >> applied is that the returned list contains only entries that match >> d_inode(fhp->fh_dentry), and usually that list has just one entry >> in it that matches the FH. >> > > That depends entirely on workload. Given that you start with 512 > buckets, you'd need to be working with at least that many inodes > concurrently to make that happen, but it could easily happen with a > large enough working set. I take it back. Neil pointed this out during a recent review: the rhltable_lookup() will return a list that contains _only_ items that match the inode. So that check is no longer needed at all. Patch 14/14 has this hunk: + list = rhltable_lookup(&nfs4_file_rhltable, &inode, + nfs4_file_rhash_params); + rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { if (refcount_inc_not_zero(&fi->fi_ref)) ret = fi; - } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) + } else fi->fi_aliased = alias_found = true; } which removes that check. >> Since 11/14 is billed as a clean-up, I'd prefer to put that kind >> of optimization in a separate patch that can be mechanically >> reverted if need be. I'll post something that goes on top of this >> series. >> > > Fair enough. You can add my Reviewed-by: and we can address it later. > >> >>>> } >>>> if (likely(ret == NULL)) { >>>> - nfsd4_file_init(fh, new); >>>> + nfsd4_file_init(fhp, new); >>>> hlist_add_head_rcu(&new->fi_hash, &file_hashtbl[hashval]); >>>> new->fi_aliased = alias_found; >>>> ret = new; >>>> @@ -4709,32 +4727,6 @@ static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, >>>> return ret; >>>> } >>>> >>>> -static struct nfs4_file * find_file(struct svc_fh *fh) >>>> -{ >>>> - struct nfs4_file *fp; >>>> - unsigned int hashval = file_hashval(fh); >>>> - >>>> - rcu_read_lock(); >>>> - fp = find_file_locked(fh, hashval); >>>> - rcu_read_unlock(); >>>> - return fp; >>>> -} >>>> - >>>> -static struct nfs4_file * >>>> -find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) >>>> -{ >>>> - struct nfs4_file *fp; >>>> - unsigned int hashval = file_hashval(fh); >>>> - >>>> - rcu_read_lock(); >>>> - fp = find_file_locked(fh, hashval); >>>> - rcu_read_unlock(); >>>> - if (fp) >>>> - return fp; >>>> - >>>> - return insert_file(new, fh, hashval); >>>> -} >>>> - >>>> static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) >>>> { >>>> hlist_del_rcu(&fi->fi_hash); >>>> @@ -5625,7 +5617,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf >>>> * and check for delegations in the process of being recalled. >>>> * If not found, create the nfs4_file struct >>>> */ >>>> - fp = find_or_add_file(open->op_file, current_fh); >>>> + fp = nfsd4_file_hash_insert(open->op_file, current_fh); >>>> if (fp != open->op_file) { >>>> status = nfs4_check_deleg(cl, open, &dp); >>>> if (status) >>>> >>>> >>> >>> -- >>> Jeff Layton <jlayton@kernel.org> >> >> -- >> Chuck Lever >> >> >> > > -- > Jeff Layton <jlayton@kernel.org> -- Chuck Lever
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 504455cb80e9..b1988a46fb9b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4683,24 +4683,42 @@ find_file_locked(const struct svc_fh *fh, unsigned int hashval) return NULL; } -static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, - unsigned int hashval) +static struct nfs4_file * find_file(struct svc_fh *fh) { struct nfs4_file *fp; + unsigned int hashval = file_hashval(fh); + + rcu_read_lock(); + fp = find_file_locked(fh, hashval); + rcu_read_unlock(); + return fp; +} + +/* + * On hash insertion, identify entries with the same inode but + * distinct filehandles. They will all be in the same hash bucket + * because nfs4_file's are hashed by the address in the fi_inode + * field. + */ +static noinline_for_stack struct nfs4_file * +nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) +{ + unsigned int hashval = file_hashval(fhp); struct nfs4_file *ret = NULL; bool alias_found = false; + struct nfs4_file *fi; spin_lock(&state_lock); - hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash, + hlist_for_each_entry_rcu(fi, &file_hashtbl[hashval], fi_hash, lockdep_is_held(&state_lock)) { - if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) { - if (refcount_inc_not_zero(&fp->fi_ref)) - ret = fp; - } else if (d_inode(fh->fh_dentry) == fp->fi_inode) - fp->fi_aliased = alias_found = true; + if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { + if (refcount_inc_not_zero(&fi->fi_ref)) + ret = fi; + } else if (d_inode(fhp->fh_dentry) == fi->fi_inode) + fi->fi_aliased = alias_found = true; } if (likely(ret == NULL)) { - nfsd4_file_init(fh, new); + nfsd4_file_init(fhp, new); hlist_add_head_rcu(&new->fi_hash, &file_hashtbl[hashval]); new->fi_aliased = alias_found; ret = new; @@ -4709,32 +4727,6 @@ static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh, return ret; } -static struct nfs4_file * find_file(struct svc_fh *fh) -{ - struct nfs4_file *fp; - unsigned int hashval = file_hashval(fh); - - rcu_read_lock(); - fp = find_file_locked(fh, hashval); - rcu_read_unlock(); - return fp; -} - -static struct nfs4_file * -find_or_add_file(struct nfs4_file *new, struct svc_fh *fh) -{ - struct nfs4_file *fp; - unsigned int hashval = file_hashval(fh); - - rcu_read_lock(); - fp = find_file_locked(fh, hashval); - rcu_read_unlock(); - if (fp) - return fp; - - return insert_file(new, fh, hashval); -} - static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) { hlist_del_rcu(&fi->fi_hash); @@ -5625,7 +5617,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf * and check for delegations in the process of being recalled. * If not found, create the nfs4_file struct */ - fp = find_or_add_file(open->op_file, current_fh); + fp = nfsd4_file_hash_insert(open->op_file, current_fh); if (fp != open->op_file) { status = nfs4_check_deleg(cl, open, &dp); if (status)