Message ID | 20230720125806.1385279-1-aahringo@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [RFC,v6.5-rc2,1/3] fs: lockd: nlm_blocked list race fixes | expand |
On Thu, 2023-07-20 at 08:58 -0400, Alexander Aring wrote: > This patch fixes races when lockd accessing the global nlm_blocked list. > It was mostly safe to access the list because everything was accessed > from the lockd kernel thread context but there exists cases like > nlmsvc_grant_deferred() that could manipulate the nlm_blocked list and > it can be called from any context. > > Cc: stable@vger.kernel.org > Signed-off-by: Alexander Aring <aahringo@redhat.com> > --- > fs/lockd/svclock.c | 13 ++++++++++++- > 1 file changed, 12 insertions(+), 1 deletion(-) > > diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c > index c43ccdf28ed9..28abec5c451d 100644 > --- a/fs/lockd/svclock.c > +++ b/fs/lockd/svclock.c > @@ -131,12 +131,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) > static inline void > nlmsvc_remove_block(struct nlm_block *block) > { > + spin_lock(&nlm_blocked_lock); > if (!list_empty(&block->b_list)) { > - spin_lock(&nlm_blocked_lock); > list_del_init(&block->b_list); > spin_unlock(&nlm_blocked_lock); > nlmsvc_release_block(block); > + return; > } > + spin_unlock(&nlm_blocked_lock); > } > > /* > @@ -152,6 +154,7 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) > file, lock->fl.fl_pid, > (long long)lock->fl.fl_start, > (long long)lock->fl.fl_end, lock->fl.fl_type); > + spin_lock(&nlm_blocked_lock); > list_for_each_entry(block, &nlm_blocked, b_list) { > fl = &block->b_call->a_args.lock.fl; > dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", > @@ -161,9 +164,11 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) > nlmdbg_cookie2a(&block->b_call->a_args.cookie)); > if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > return block; > } > } > + spin_unlock(&nlm_blocked_lock); > > return NULL; > } > @@ -185,16 +190,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie) > { > struct nlm_block *block; > > + spin_lock(&nlm_blocked_lock); > list_for_each_entry(block, &nlm_blocked, b_list) { > if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) > goto found; > } > + spin_unlock(&nlm_blocked_lock); > > return NULL; > > found: > dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > return block; > } > > @@ -317,6 +325,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, > > restart: > mutex_lock(&file->f_mutex); > + spin_lock(&nlm_blocked_lock); > list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { > if (!match(block->b_host, host)) > continue; > @@ -325,11 +334,13 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, > if (list_empty(&block->b_list)) > continue; > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > mutex_unlock(&file->f_mutex); > nlmsvc_unlink_block(block); > nlmsvc_release_block(block); > goto restart; > } > + spin_unlock(&nlm_blocked_lock); > mutex_unlock(&file->f_mutex); > } > The patch itself looks correct. Walking these lists without holding the lock is quite suspicious. Not sure about the stable designation here though, unless you have a way to easily reproduce this. Reviewed-by: Jeff Layton <jlayton@kernel.org>
On Thu, Jul 20, 2023 at 08:58:04AM -0400, Alexander Aring wrote: > This patch fixes races when lockd accessing the global nlm_blocked list. > It was mostly safe to access the list because everything was accessed > from the lockd kernel thread context but there exists cases like > nlmsvc_grant_deferred() that could manipulate the nlm_blocked list and > it can be called from any context. > > Cc: stable@vger.kernel.org > Signed-off-by: Alexander Aring <aahringo@redhat.com> I agree with Jeff, this one looks fine to apply to nfsd-next. I've done that so it can get test exposure while we consider 2/3 and 3/3. I've dropped the "Cc: stable" tag -- since there is no specific bug report this fix addresses, I will defer the decision about backporting at least until we have some test experience. > --- > fs/lockd/svclock.c | 13 ++++++++++++- > 1 file changed, 12 insertions(+), 1 deletion(-) > > diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c > index c43ccdf28ed9..28abec5c451d 100644 > --- a/fs/lockd/svclock.c > +++ b/fs/lockd/svclock.c > @@ -131,12 +131,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) > static inline void > nlmsvc_remove_block(struct nlm_block *block) > { > + spin_lock(&nlm_blocked_lock); > if (!list_empty(&block->b_list)) { > - spin_lock(&nlm_blocked_lock); > list_del_init(&block->b_list); > spin_unlock(&nlm_blocked_lock); > nlmsvc_release_block(block); > + return; > } > + spin_unlock(&nlm_blocked_lock); > } > > /* > @@ -152,6 +154,7 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) > file, lock->fl.fl_pid, > (long long)lock->fl.fl_start, > (long long)lock->fl.fl_end, lock->fl.fl_type); > + spin_lock(&nlm_blocked_lock); > list_for_each_entry(block, &nlm_blocked, b_list) { > fl = &block->b_call->a_args.lock.fl; > dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", > @@ -161,9 +164,11 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) > nlmdbg_cookie2a(&block->b_call->a_args.cookie)); > if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > return block; > } > } > + spin_unlock(&nlm_blocked_lock); > > return NULL; > } > @@ -185,16 +190,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie) > { > struct nlm_block *block; > > + spin_lock(&nlm_blocked_lock); > list_for_each_entry(block, &nlm_blocked, b_list) { > if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) > goto found; > } > + spin_unlock(&nlm_blocked_lock); > > return NULL; > > found: > dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > return block; > } > > @@ -317,6 +325,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, > > restart: > mutex_lock(&file->f_mutex); > + spin_lock(&nlm_blocked_lock); > list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { > if (!match(block->b_host, host)) > continue; > @@ -325,11 +334,13 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, > if (list_empty(&block->b_list)) > continue; > kref_get(&block->b_count); > + spin_unlock(&nlm_blocked_lock); > mutex_unlock(&file->f_mutex); > nlmsvc_unlink_block(block); > nlmsvc_release_block(block); > goto restart; > } > + spin_unlock(&nlm_blocked_lock); > mutex_unlock(&file->f_mutex); > } > > -- > 2.31.1 >
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index c43ccdf28ed9..28abec5c451d 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -131,12 +131,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) static inline void nlmsvc_remove_block(struct nlm_block *block) { + spin_lock(&nlm_blocked_lock); if (!list_empty(&block->b_list)) { - spin_lock(&nlm_blocked_lock); list_del_init(&block->b_list); spin_unlock(&nlm_blocked_lock); nlmsvc_release_block(block); + return; } + spin_unlock(&nlm_blocked_lock); } /* @@ -152,6 +154,7 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) file, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); + spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", @@ -161,9 +164,11 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); return block; } } + spin_unlock(&nlm_blocked_lock); return NULL; } @@ -185,16 +190,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie) { struct nlm_block *block; + spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) goto found; } + spin_unlock(&nlm_blocked_lock); return NULL; found: dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); return block; } @@ -317,6 +325,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, restart: mutex_lock(&file->f_mutex); + spin_lock(&nlm_blocked_lock); list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { if (!match(block->b_host, host)) continue; @@ -325,11 +334,13 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, if (list_empty(&block->b_list)) continue; kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); nlmsvc_unlink_block(block); nlmsvc_release_block(block); goto restart; } + spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); }
This patch fixes races when lockd accessing the global nlm_blocked list. It was mostly safe to access the list because everything was accessed from the lockd kernel thread context but there exists cases like nlmsvc_grant_deferred() that could manipulate the nlm_blocked list and it can be called from any context. Cc: stable@vger.kernel.org Signed-off-by: Alexander Aring <aahringo@redhat.com> --- fs/lockd/svclock.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)