diff mbox series

[3/4] nfsd: make nfsd4_run_cb a bool return function

Message ID 20220926163847.47558-4-jlayton@kernel.org (mailing list archive)
State New, archived
Headers show
Series nfsd: minor cleanup and sanity check patches for nfsd | expand

Commit Message

Jeff Layton Sept. 26, 2022, 4:38 p.m. UTC
...and mirror the semantics of queue_work. Also, queueing a delegation
recall should always succeed when queueing, so WARN if one ever doesn't.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
---
 fs/nfsd/nfs4callback.c | 14 ++++++++++++--
 fs/nfsd/nfs4state.c    |  5 ++---
 fs/nfsd/state.h        |  2 +-
 3 files changed, 15 insertions(+), 6 deletions(-)

Comments

Chuck Lever Sept. 26, 2022, 5:51 p.m. UTC | #1
> On Sep 26, 2022, at 12:38 PM, Jeff Layton <jlayton@kernel.org> wrote:
> 
> ...and mirror the semantics of queue_work. Also, queueing a delegation
> recall should always succeed when queueing, so WARN if one ever doesn't.

The description is not especially clear. It seems like the point
of this patch is really the part after the "Also, ..." Otherwise,
I'm not getting why this change is really needed?

Maybe a tracepoint would be less alarming to users/administrators
than would a WARN with a full stack trace? The kdoc comment you
added (thank you!) suggests there are times when it is OK for the
nfsd4_queue_cb() to fail.


> Signed-off-by: Jeff Layton <jlayton@kernel.org>
> ---
> fs/nfsd/nfs4callback.c | 14 ++++++++++++--
> fs/nfsd/nfs4state.c    |  5 ++---
> fs/nfsd/state.h        |  2 +-
> 3 files changed, 15 insertions(+), 6 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> index 4ce328209f61..ba904614ebf5 100644
> --- a/fs/nfsd/nfs4callback.c
> +++ b/fs/nfsd/nfs4callback.c
> @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> 	cb->cb_holds_slot = false;
> }
> 
> -void nfsd4_run_cb(struct nfsd4_callback *cb)
> +/**
> + * nfsd4_run_cb - queue up a callback job to run
> + * @cb: callback to queue
> + *
> + * Kick off a callback to do its thing. Returns false if it was already
> + * queued or running, true otherwise.
> + */
> +bool nfsd4_run_cb(struct nfsd4_callback *cb)
> {
> +	bool queued;
> 	struct nfs4_client *clp = cb->cb_clp;

Reverse christmas tree, please.


> 
> 	nfsd41_cb_inflight_begin(clp);
> -	if (!nfsd4_queue_cb(cb))
> +	queued = nfsd4_queue_cb(cb);
> +	if (!queued)
> 		nfsd41_cb_inflight_end(clp);
> +	return queued;
> }
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index 211f1af1cfb3..90533f43fea9 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
> 	 * we know it's safe to take a reference.
> 	 */
> 	refcount_inc(&dp->dl_stid.sc_count);
> -	nfsd4_run_cb(&dp->dl_recall);
> +	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
> }
> 
> /* Called from break_lease() with flc_lock held. */
> static bool
> nfsd_break_deleg_cb(struct file_lock *fl)
> {
> -	bool ret = false;
> 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
> 	struct nfs4_file *fp = dp->dl_stid.sc_file;
> 	struct nfs4_client *clp = dp->dl_stid.sc_client;
> @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
> 	fp->fi_had_conflict = true;
> 	nfsd_break_one_deleg(dp);
> 	spin_unlock(&fp->fi_lock);
> -	return ret;
> +	return false;
> }
> 
> /**
> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> index b3477087a9fc..e2daef3cc003 100644
> --- a/fs/nfsd/state.h
> +++ b/fs/nfsd/state.h
> @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
> extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
> extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
> -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
> +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
> extern int nfsd4_create_callback_queue(void);
> extern void nfsd4_destroy_callback_queue(void);
> extern void nfsd4_shutdown_callback(struct nfs4_client *);
> -- 
> 2.37.3
> 

--
Chuck Lever
Jeff Layton Sept. 26, 2022, 6:24 p.m. UTC | #2
On Mon, 2022-09-26 at 17:51 +0000, Chuck Lever III wrote:
> 
> > On Sep 26, 2022, at 12:38 PM, Jeff Layton <jlayton@kernel.org> wrote:
> > 
> > ...and mirror the semantics of queue_work. Also, queueing a delegation
> > recall should always succeed when queueing, so WARN if one ever doesn't.
> 
> The description is not especially clear. It seems like the point
> of this patch is really the part after the "Also, ..." Otherwise,
> I'm not getting why this change is really needed?
> 
> Maybe a tracepoint would be less alarming to users/administrators
> than would a WARN with a full stack trace? The kdoc comment you
> added (thank you!) suggests there are times when it is OK for the
> nfsd4_queue_cb() to fail.
> 
> 

Yeah, that's pretty much the case with the "also". There may be some
other cases where we ought to catch this sort of thing too. We'd likely
never see a tracepoint for this. We'd just notice that there was a
refcount leak.

queue_work can return false without queueing anything if the work is
already queued. In this case, we will have taken an extra reference to
the stid that will never get put. I think this should never happen
because of the flc_lock, but it'd be good to catch it if it does.

That said, I don't feel that strongly about the patch, so if you think
it's not worthwhile, I'm fine with holding off on it.

> > Signed-off-by: Jeff Layton <jlayton@kernel.org>
> > ---
> > fs/nfsd/nfs4callback.c | 14 ++++++++++++--
> > fs/nfsd/nfs4state.c    |  5 ++---
> > fs/nfsd/state.h        |  2 +-
> > 3 files changed, 15 insertions(+), 6 deletions(-)
> > 
> > diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> > index 4ce328209f61..ba904614ebf5 100644
> > --- a/fs/nfsd/nfs4callback.c
> > +++ b/fs/nfsd/nfs4callback.c
> > @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> > 	cb->cb_holds_slot = false;
> > }
> > 
> > -void nfsd4_run_cb(struct nfsd4_callback *cb)
> > +/**
> > + * nfsd4_run_cb - queue up a callback job to run
> > + * @cb: callback to queue
> > + *
> > + * Kick off a callback to do its thing. Returns false if it was already
> > + * queued or running, true otherwise.
> > + */
> > +bool nfsd4_run_cb(struct nfsd4_callback *cb)
> > {
> > +	bool queued;
> > 	struct nfs4_client *clp = cb->cb_clp;
> 
> Reverse christmas tree, please.
> 
> 
> > 
> > 	nfsd41_cb_inflight_begin(clp);
> > -	if (!nfsd4_queue_cb(cb))
> > +	queued = nfsd4_queue_cb(cb);
> > +	if (!queued)
> > 		nfsd41_cb_inflight_end(clp);
> > +	return queued;
> > }
> > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> > index 211f1af1cfb3..90533f43fea9 100644
> > --- a/fs/nfsd/nfs4state.c
> > +++ b/fs/nfsd/nfs4state.c
> > @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
> > 	 * we know it's safe to take a reference.
> > 	 */
> > 	refcount_inc(&dp->dl_stid.sc_count);
> > -	nfsd4_run_cb(&dp->dl_recall);
> > +	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
> > }
> > 
> > /* Called from break_lease() with flc_lock held. */
> > static bool
> > nfsd_break_deleg_cb(struct file_lock *fl)
> > {
> > -	bool ret = false;
> > 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
> > 	struct nfs4_file *fp = dp->dl_stid.sc_file;
> > 	struct nfs4_client *clp = dp->dl_stid.sc_client;
> > @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
> > 	fp->fi_had_conflict = true;
> > 	nfsd_break_one_deleg(dp);
> > 	spin_unlock(&fp->fi_lock);
> > -	return ret;
> > +	return false;
> > }
> > 
> > /**
> > diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> > index b3477087a9fc..e2daef3cc003 100644
> > --- a/fs/nfsd/state.h
> > +++ b/fs/nfsd/state.h
> > @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
> > extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
> > extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> > 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
> > -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
> > +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
> > extern int nfsd4_create_callback_queue(void);
> > extern void nfsd4_destroy_callback_queue(void);
> > extern void nfsd4_shutdown_callback(struct nfs4_client *);
> > -- 
> > 2.37.3
> > 
> 
> --
> Chuck Lever
> 
> 
>
Chuck Lever Sept. 26, 2022, 6:31 p.m. UTC | #3
> On Sep 26, 2022, at 2:24 PM, Jeff Layton <jlayton@kernel.org> wrote:
> 
> On Mon, 2022-09-26 at 17:51 +0000, Chuck Lever III wrote:
>> 
>>> On Sep 26, 2022, at 12:38 PM, Jeff Layton <jlayton@kernel.org> wrote:
>>> 
>>> ...and mirror the semantics of queue_work. Also, queueing a delegation
>>> recall should always succeed when queueing, so WARN if one ever doesn't.
>> 
>> The description is not especially clear. It seems like the point
>> of this patch is really the part after the "Also, ..." Otherwise,
>> I'm not getting why this change is really needed?
>> 
>> Maybe a tracepoint would be less alarming to users/administrators
>> than would a WARN with a full stack trace? The kdoc comment you
>> added (thank you!) suggests there are times when it is OK for the
>> nfsd4_queue_cb() to fail.
>> 
>> 
> 
> Yeah, that's pretty much the case with the "also". There may be some
> other cases where we ought to catch this sort of thing too. We'd likely
> never see a tracepoint for this. We'd just notice that there was a
> refcount leak.
> 
> queue_work can return false without queueing anything if the work is
> already queued. In this case, we will have taken an extra reference to
> the stid that will never get put. I think this should never happen
> because of the flc_lock, but it'd be good to catch it if it does.

I've pushed Dai's v3 SSC fix and the first two patches in this series
to the nfsd for-next tree. Can you send me 3/4 and 4/4 again but 

 - Include the above text in the description of 3/4, and

 - rebase 4/4 on top of the latest nfsd for-next

Thanks!


> That said, I don't feel that strongly about the patch, so if you think
> it's not worthwhile, I'm fine with holding off on it.
> 
>>> Signed-off-by: Jeff Layton <jlayton@kernel.org>
>>> ---
>>> fs/nfsd/nfs4callback.c | 14 ++++++++++++--
>>> fs/nfsd/nfs4state.c    |  5 ++---
>>> fs/nfsd/state.h        |  2 +-
>>> 3 files changed, 15 insertions(+), 6 deletions(-)
>>> 
>>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>>> index 4ce328209f61..ba904614ebf5 100644
>>> --- a/fs/nfsd/nfs4callback.c
>>> +++ b/fs/nfsd/nfs4callback.c
>>> @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
>>> 	cb->cb_holds_slot = false;
>>> }
>>> 
>>> -void nfsd4_run_cb(struct nfsd4_callback *cb)
>>> +/**
>>> + * nfsd4_run_cb - queue up a callback job to run
>>> + * @cb: callback to queue
>>> + *
>>> + * Kick off a callback to do its thing. Returns false if it was already
>>> + * queued or running, true otherwise.
>>> + */
>>> +bool nfsd4_run_cb(struct nfsd4_callback *cb)
>>> {
>>> +	bool queued;
>>> 	struct nfs4_client *clp = cb->cb_clp;
>> 
>> Reverse christmas tree, please.
>> 
>> 
>>> 
>>> 	nfsd41_cb_inflight_begin(clp);
>>> -	if (!nfsd4_queue_cb(cb))
>>> +	queued = nfsd4_queue_cb(cb);
>>> +	if (!queued)
>>> 		nfsd41_cb_inflight_end(clp);
>>> +	return queued;
>>> }
>>> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
>>> index 211f1af1cfb3..90533f43fea9 100644
>>> --- a/fs/nfsd/nfs4state.c
>>> +++ b/fs/nfsd/nfs4state.c
>>> @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
>>> 	 * we know it's safe to take a reference.
>>> 	 */
>>> 	refcount_inc(&dp->dl_stid.sc_count);
>>> -	nfsd4_run_cb(&dp->dl_recall);
>>> +	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
>>> }
>>> 
>>> /* Called from break_lease() with flc_lock held. */
>>> static bool
>>> nfsd_break_deleg_cb(struct file_lock *fl)
>>> {
>>> -	bool ret = false;
>>> 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
>>> 	struct nfs4_file *fp = dp->dl_stid.sc_file;
>>> 	struct nfs4_client *clp = dp->dl_stid.sc_client;
>>> @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
>>> 	fp->fi_had_conflict = true;
>>> 	nfsd_break_one_deleg(dp);
>>> 	spin_unlock(&fp->fi_lock);
>>> -	return ret;
>>> +	return false;
>>> }
>>> 
>>> /**
>>> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
>>> index b3477087a9fc..e2daef3cc003 100644
>>> --- a/fs/nfsd/state.h
>>> +++ b/fs/nfsd/state.h
>>> @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
>>> extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
>>> extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
>>> 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
>>> -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
>>> +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
>>> extern int nfsd4_create_callback_queue(void);
>>> extern void nfsd4_destroy_callback_queue(void);
>>> extern void nfsd4_shutdown_callback(struct nfs4_client *);
>>> -- 
>>> 2.37.3
>>> 
>> 
>> --
>> Chuck Lever
>> 
>> 
>> 
> 
> -- 
> Jeff Layton <jlayton@kernel.org>

--
Chuck Lever
diff mbox series

Patch

diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 4ce328209f61..ba904614ebf5 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1371,11 +1371,21 @@  void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
 	cb->cb_holds_slot = false;
 }
 
-void nfsd4_run_cb(struct nfsd4_callback *cb)
+/**
+ * nfsd4_run_cb - queue up a callback job to run
+ * @cb: callback to queue
+ *
+ * Kick off a callback to do its thing. Returns false if it was already
+ * queued or running, true otherwise.
+ */
+bool nfsd4_run_cb(struct nfsd4_callback *cb)
 {
+	bool queued;
 	struct nfs4_client *clp = cb->cb_clp;
 
 	nfsd41_cb_inflight_begin(clp);
-	if (!nfsd4_queue_cb(cb))
+	queued = nfsd4_queue_cb(cb);
+	if (!queued)
 		nfsd41_cb_inflight_end(clp);
+	return queued;
 }
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 211f1af1cfb3..90533f43fea9 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4861,14 +4861,13 @@  static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
 	 * we know it's safe to take a reference.
 	 */
 	refcount_inc(&dp->dl_stid.sc_count);
-	nfsd4_run_cb(&dp->dl_recall);
+	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
 }
 
 /* Called from break_lease() with flc_lock held. */
 static bool
 nfsd_break_deleg_cb(struct file_lock *fl)
 {
-	bool ret = false;
 	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
 	struct nfs4_file *fp = dp->dl_stid.sc_file;
 	struct nfs4_client *clp = dp->dl_stid.sc_client;
@@ -4894,7 +4893,7 @@  nfsd_break_deleg_cb(struct file_lock *fl)
 	fp->fi_had_conflict = true;
 	nfsd_break_one_deleg(dp);
 	spin_unlock(&fp->fi_lock);
-	return ret;
+	return false;
 }
 
 /**
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index b3477087a9fc..e2daef3cc003 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -692,7 +692,7 @@  extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
 extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
 extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
 		const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
-extern void nfsd4_run_cb(struct nfsd4_callback *cb);
+extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
 extern int nfsd4_create_callback_queue(void);
 extern void nfsd4_destroy_callback_queue(void);
 extern void nfsd4_shutdown_callback(struct nfs4_client *);