diff mbox series

[v4,2/2] NFSD: add rpc_status entry in nfsd debug filesystem

Message ID a23a0482a465299ac06d07d191e0c9377a11a4d1.1690569488.git.lorenzo@kernel.org (mailing list archive)
State New, archived
Headers show
Series add rpc_status handler in nfsd debug filesystem | expand

Commit Message

Lorenzo Bianconi July 28, 2023, 6:44 p.m. UTC
Introduce rpc_status entry in nfsd debug filesystem in order to dump
pending RPC requests debugging information.

Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=366
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 fs/nfsd/nfs4proc.c         |   4 +-
 fs/nfsd/nfsctl.c           |  10 +++
 fs/nfsd/nfsd.h             |   2 +
 fs/nfsd/nfssvc.c           | 122 +++++++++++++++++++++++++++++++++++++
 include/linux/sunrpc/svc.h |   1 +
 net/sunrpc/svc.c           |   2 +-
 6 files changed, 137 insertions(+), 4 deletions(-)

Comments

Chuck Lever July 28, 2023, 7:22 p.m. UTC | #1
On Fri, Jul 28, 2023 at 08:44:04PM +0200, Lorenzo Bianconi wrote:
> Introduce rpc_status entry in nfsd debug filesystem in order to dump
> pending RPC requests debugging information.
> 
> Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=366
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>  fs/nfsd/nfs4proc.c         |   4 +-
>  fs/nfsd/nfsctl.c           |  10 +++
>  fs/nfsd/nfsd.h             |   2 +
>  fs/nfsd/nfssvc.c           | 122 +++++++++++++++++++++++++++++++++++++
>  include/linux/sunrpc/svc.h |   1 +
>  net/sunrpc/svc.c           |   2 +-
>  6 files changed, 137 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> index f0f318e78630..b7ad3081bc36 100644
> --- a/fs/nfsd/nfs4proc.c
> +++ b/fs/nfsd/nfs4proc.c
> @@ -2497,8 +2497,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
>  
>  static const struct nfsd4_operation nfsd4_ops[];
>  
> -static const char *nfsd4_op_name(unsigned opnum);
> -
>  /*
>   * Enforce NFSv4.1 COMPOUND ordering rules:
>   *
> @@ -3628,7 +3626,7 @@ void warn_on_nonidempotent_op(struct nfsd4_op *op)
>  	}
>  }
>  
> -static const char *nfsd4_op_name(unsigned opnum)
> +const char *nfsd4_op_name(unsigned opnum)
>  {
>  	if (opnum < ARRAY_SIZE(nfsd4_ops))
>  		return nfsd4_ops[opnum].op_name;
> diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> index 35d2e2cde1eb..f2e4f4b1e4d1 100644
> --- a/fs/nfsd/nfsctl.c
> +++ b/fs/nfsd/nfsctl.c
> @@ -57,6 +57,8 @@ enum {
>  	NFSD_RecoveryDir,
>  	NFSD_V4EndGrace,
>  #endif
> +	NFSD_Rpc_Status,
> +
>  	NFSD_MaxReserved
>  };
>  
> @@ -195,6 +197,13 @@ static inline struct net *netns(struct file *file)
>  	return file_inode(file)->i_sb->s_fs_info;
>  }
>  
> +static const struct file_operations nfsd_rpc_status_operations = {
> +	.open		= nfsd_rpc_status_open,
> +	.read		= seq_read,
> +	.llseek		= seq_lseek,
> +	.release	= nfsd_pool_stats_release,
> +};
> +
>  /*
>   * write_unlock_ip - Release all locks used by a client
>   *
> @@ -1400,6 +1409,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
>  		[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
>  		[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
>  #endif
> +		[NFSD_Rpc_Status] = {"rpc_status", &nfsd_rpc_status_operations, S_IRUGO},
>  		/* last one */ {""}
>  	};
>  
> diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
> index d88498f8b275..75a3e1d55bc8 100644
> --- a/fs/nfsd/nfsd.h
> +++ b/fs/nfsd/nfsd.h
> @@ -94,6 +94,7 @@ int		nfsd_get_nrthreads(int n, int *, struct net *);
>  int		nfsd_set_nrthreads(int n, int *, struct net *);
>  int		nfsd_pool_stats_open(struct inode *, struct file *);
>  int		nfsd_pool_stats_release(struct inode *, struct file *);
> +int		nfsd_rpc_status_open(struct inode *inode, struct file *file);
>  void		nfsd_shutdown_threads(struct net *net);
>  
>  void		nfsd_put(struct net *net);
> @@ -506,6 +507,7 @@ extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
>  
>  extern void nfsd4_init_leases_net(struct nfsd_net *nn);
>  
> +const char *nfsd4_op_name(unsigned opnum);
>  #else /* CONFIG_NFSD_V4 */
>  static inline int nfsd4_is_junction(struct dentry *dentry)
>  {
> diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> index 97830e28c140..e9e954b5ae47 100644
> --- a/fs/nfsd/nfssvc.c
> +++ b/fs/nfsd/nfssvc.c
> @@ -1057,6 +1057,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
>  	if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
>  		goto out_decode_err;
>  
> +	atomic_inc(&rqstp->rq_status_counter);
> +

Does this really have to be an atomic_t ? Seems like nfsd_dispatch
is the only function updating it. You might need release semantics
here and acquire semantics in nfsd_rpc_status_show(). I'd rather
avoid a full-on atomic op in nfsd_dispatch() unless it's absolutely
needed.

Also, do you need to bump the rq_status_counter in the other RPC
dispatch routines (lockd and nfs callback) too?


>  	rp = NULL;
>  	switch (nfsd_cache_lookup(rqstp, &rp)) {
>  	case RC_DOIT:
> @@ -1074,6 +1076,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
>  	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
>  		goto out_encode_err;
>  
> +	atomic_inc(&rqstp->rq_status_counter);
> +
>  	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
>  out_cached_reply:
>  	return 1;
> @@ -1149,3 +1153,121 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
>  	mutex_unlock(&nfsd_mutex);
>  	return ret;
>  }
> +
> +static int nfsd_rpc_status_show(struct seq_file *m, void *v)
> +{
> +	struct inode *inode = file_inode(m->file);
> +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> +	int i;
> +
> +	rcu_read_lock();
> +
> +	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
> +		struct svc_rqst *rqstp;
> +
> +		list_for_each_entry_rcu(rqstp,
> +				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
> +				rq_all) {
> +			struct nfsd_rpc_status_info {
> +				struct sockaddr daddr;
> +				struct sockaddr saddr;
> +				unsigned long rq_flags;
> +				__be32 rq_xid;
> +				u32 rq_prog;
> +				u32 rq_vers;
> +				const char *pc_name;
> +				ktime_t rq_stime;
> +				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
> +			} rqstp_info;
> +			unsigned int status_counter;
> +			char buf[RPC_MAX_ADDRBUFLEN];
> +			int j, opcnt = 0;
> +
> +			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
> +				continue;
> +
> +			status_counter = atomic_read(&rqstp->rq_status_counter);

Neil said:

> I suggest you add add a counter to the rqstp which is incremented from
> even to odd after parsing a request - including he v4 parsing needed to
> have a sable ->opcnt - and then incremented from odd to even when the
> request is complete.
> Then this code samples the counter, skips the rqst if the counter is
> even, and resamples the counter after collecting the data.  If it has
> changed, the drop the record.

I don't see a check if the status counter is even.

Also, as above, I'm not sure atomic_read() is necessary here. Maybe
just READ_ONCE() ? Neil, any thoughts?


> +
> +			rqstp_info.rq_xid = rqstp->rq_xid;
> +			rqstp_info.rq_flags = rqstp->rq_flags;
> +			rqstp_info.rq_prog = rqstp->rq_prog;
> +			rqstp_info.rq_vers = rqstp->rq_vers;
> +			rqstp_info.pc_name = svc_proc_name(rqstp);
> +			rqstp_info.rq_stime = rqstp->rq_stime;
> +			memcpy(&rqstp_info.daddr, svc_daddr(rqstp),
> +			       sizeof(struct sockaddr));
> +			memcpy(&rqstp_info.saddr, svc_addr(rqstp),
> +			       sizeof(struct sockaddr));
> +
> +#ifdef CONFIG_NFSD_V4
> +			if (rqstp->rq_vers == NFS4_VERSION &&
> +			    rqstp->rq_proc == NFSPROC4_COMPOUND) {
> +				/* NFSv4 compund */
> +				struct nfsd4_compoundargs *args = rqstp->rq_argp;
> +
> +				opcnt = args->opcnt;
> +				for (j = 0; j < opcnt; j++) {
> +					struct nfsd4_op *op = &args->ops[j];
> +
> +					rqstp_info.opnum[j] = op->opnum;
> +				}
> +			}
> +#endif /* CONFIG_NFSD_V4 */
> +
> +			/* In order to detect if the RPC request is pending and
> +			 * RPC info are stable we check if rq_status_counter
> +			 * has been incremented during the handler processing.
> +			 */
> +			if (status_counter != atomic_read(&rqstp->rq_status_counter))
> +				continue;
> +
> +			seq_printf(m,
> +				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
> +				   be32_to_cpu(rqstp_info.rq_xid),
> +				   rqstp_info.rq_flags,
> +				   rqstp_info.rq_prog,
> +				   rqstp_info.rq_vers,
> +				   rqstp_info.pc_name,
> +				   ktime_to_us(rqstp_info.rq_stime));
> +
> +			seq_printf(m, " %s,",
> +				   __svc_print_addr(&rqstp_info.saddr, buf,
> +						    sizeof(buf), false));
> +			seq_printf(m, " %s,",
> +				   __svc_print_addr(&rqstp_info.daddr, buf,
> +						    sizeof(buf), false));
> +			for (j = 0; j < opcnt; j++)
> +				seq_printf(m, " %s%s",
> +					   nfsd4_op_name(rqstp_info.opnum[j]),
> +					   j == opcnt - 1 ? "," : "");
> +			seq_puts(m, "\n");
> +		}
> +	}
> +
> +	rcu_read_unlock();
> +
> +	return 0;
> +}
> +
> +/**
> + * nfsd_rpc_status_open - Atomically copy a write verifier

The kdoc comment maybe was copied, pasted, and then not updated?

> + * @inode: entry inode pointer.
> + * @file: entry file pointer.
> + *
> + * This routine dumps pending RPC requests info queued into nfs server.
> + */
> +int nfsd_rpc_status_open(struct inode *inode, struct file *file)
> +{
> +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> +
> +	mutex_lock(&nfsd_mutex);
> +	if (!nn->nfsd_serv) {
> +		mutex_unlock(&nfsd_mutex);
> +		return -ENODEV;
> +	}
> +
> +	svc_get(nn->nfsd_serv);
> +	mutex_unlock(&nfsd_mutex);
> +
> +	return single_open(file, nfsd_rpc_status_show, inode->i_private);
> +}
> diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> index fe1394cc1371..cb516da9e270 100644
> --- a/include/linux/sunrpc/svc.h
> +++ b/include/linux/sunrpc/svc.h
> @@ -270,6 +270,7 @@ struct svc_rqst {
>  						 * net namespace
>  						 */
>  	void **			rq_lease_breaker; /* The v4 client breaking a lease */
> +	atomic_t		rq_status_counter; /* RPC processing counter */
>  };
>  
>  #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> index 587811a002c9..44eac83b35a1 100644
> --- a/net/sunrpc/svc.c
> +++ b/net/sunrpc/svc.c
> @@ -1629,7 +1629,7 @@ const char *svc_proc_name(const struct svc_rqst *rqstp)
>  		return rqstp->rq_procinfo->pc_name;
>  	return "unknown";
>  }
> -
> +EXPORT_SYMBOL_GPL(svc_proc_name);
>  
>  /**
>   * svc_encode_result_payload - mark a range of bytes as a result payload
> -- 
> 2.41.0
>
NeilBrown July 28, 2023, 10:11 p.m. UTC | #2
On Sat, 29 Jul 2023, Chuck Lever wrote:
> On Fri, Jul 28, 2023 at 08:44:04PM +0200, Lorenzo Bianconi wrote:
> > Introduce rpc_status entry in nfsd debug filesystem in order to dump
> > pending RPC requests debugging information.
> > 
> > Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=366
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > ---
> >  fs/nfsd/nfs4proc.c         |   4 +-
> >  fs/nfsd/nfsctl.c           |  10 +++
> >  fs/nfsd/nfsd.h             |   2 +
> >  fs/nfsd/nfssvc.c           | 122 +++++++++++++++++++++++++++++++++++++
> >  include/linux/sunrpc/svc.h |   1 +
> >  net/sunrpc/svc.c           |   2 +-
> >  6 files changed, 137 insertions(+), 4 deletions(-)
> > 
> > diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> > index f0f318e78630..b7ad3081bc36 100644
> > --- a/fs/nfsd/nfs4proc.c
> > +++ b/fs/nfsd/nfs4proc.c
> > @@ -2497,8 +2497,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
> >  
> >  static const struct nfsd4_operation nfsd4_ops[];
> >  
> > -static const char *nfsd4_op_name(unsigned opnum);
> > -
> >  /*
> >   * Enforce NFSv4.1 COMPOUND ordering rules:
> >   *
> > @@ -3628,7 +3626,7 @@ void warn_on_nonidempotent_op(struct nfsd4_op *op)
> >  	}
> >  }
> >  
> > -static const char *nfsd4_op_name(unsigned opnum)
> > +const char *nfsd4_op_name(unsigned opnum)
> >  {
> >  	if (opnum < ARRAY_SIZE(nfsd4_ops))
> >  		return nfsd4_ops[opnum].op_name;
> > diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> > index 35d2e2cde1eb..f2e4f4b1e4d1 100644
> > --- a/fs/nfsd/nfsctl.c
> > +++ b/fs/nfsd/nfsctl.c
> > @@ -57,6 +57,8 @@ enum {
> >  	NFSD_RecoveryDir,
> >  	NFSD_V4EndGrace,
> >  #endif
> > +	NFSD_Rpc_Status,
> > +
> >  	NFSD_MaxReserved
> >  };
> >  
> > @@ -195,6 +197,13 @@ static inline struct net *netns(struct file *file)
> >  	return file_inode(file)->i_sb->s_fs_info;
> >  }
> >  
> > +static const struct file_operations nfsd_rpc_status_operations = {
> > +	.open		= nfsd_rpc_status_open,
> > +	.read		= seq_read,
> > +	.llseek		= seq_lseek,
> > +	.release	= nfsd_pool_stats_release,
> > +};
> > +
> >  /*
> >   * write_unlock_ip - Release all locks used by a client
> >   *
> > @@ -1400,6 +1409,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
> >  		[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
> >  		[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
> >  #endif
> > +		[NFSD_Rpc_Status] = {"rpc_status", &nfsd_rpc_status_operations, S_IRUGO},
> >  		/* last one */ {""}
> >  	};
> >  
> > diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
> > index d88498f8b275..75a3e1d55bc8 100644
> > --- a/fs/nfsd/nfsd.h
> > +++ b/fs/nfsd/nfsd.h
> > @@ -94,6 +94,7 @@ int		nfsd_get_nrthreads(int n, int *, struct net *);
> >  int		nfsd_set_nrthreads(int n, int *, struct net *);
> >  int		nfsd_pool_stats_open(struct inode *, struct file *);
> >  int		nfsd_pool_stats_release(struct inode *, struct file *);
> > +int		nfsd_rpc_status_open(struct inode *inode, struct file *file);
> >  void		nfsd_shutdown_threads(struct net *net);
> >  
> >  void		nfsd_put(struct net *net);
> > @@ -506,6 +507,7 @@ extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
> >  
> >  extern void nfsd4_init_leases_net(struct nfsd_net *nn);
> >  
> > +const char *nfsd4_op_name(unsigned opnum);
> >  #else /* CONFIG_NFSD_V4 */
> >  static inline int nfsd4_is_junction(struct dentry *dentry)
> >  {
> > diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> > index 97830e28c140..e9e954b5ae47 100644
> > --- a/fs/nfsd/nfssvc.c
> > +++ b/fs/nfsd/nfssvc.c
> > @@ -1057,6 +1057,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
> >  	if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
> >  		goto out_decode_err;
> >  
> > +	atomic_inc(&rqstp->rq_status_counter);
> > +
> 
> Does this really have to be an atomic_t ? Seems like nfsd_dispatch
> is the only function updating it. You might need release semantics
> here and acquire semantics in nfsd_rpc_status_show(). I'd rather
> avoid a full-on atomic op in nfsd_dispatch() unless it's absolutely
> needed.
> 
> Also, do you need to bump the rq_status_counter in the other RPC
> dispatch routines (lockd and nfs callback) too?
> 
> 
> >  	rp = NULL;
> >  	switch (nfsd_cache_lookup(rqstp, &rp)) {
> >  	case RC_DOIT:
> > @@ -1074,6 +1076,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
> >  	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
> >  		goto out_encode_err;
> >  
> > +	atomic_inc(&rqstp->rq_status_counter);
> > +
> >  	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
> >  out_cached_reply:
> >  	return 1;
> > @@ -1149,3 +1153,121 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> >  	mutex_unlock(&nfsd_mutex);
> >  	return ret;
> >  }
> > +
> > +static int nfsd_rpc_status_show(struct seq_file *m, void *v)
> > +{
> > +	struct inode *inode = file_inode(m->file);
> > +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > +	int i;
> > +
> > +	rcu_read_lock();
> > +
> > +	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
> > +		struct svc_rqst *rqstp;
> > +
> > +		list_for_each_entry_rcu(rqstp,
> > +				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
> > +				rq_all) {
> > +			struct nfsd_rpc_status_info {
> > +				struct sockaddr daddr;
> > +				struct sockaddr saddr;
> > +				unsigned long rq_flags;
> > +				__be32 rq_xid;
> > +				u32 rq_prog;
> > +				u32 rq_vers;
> > +				const char *pc_name;
> > +				ktime_t rq_stime;
> > +				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
> > +			} rqstp_info;
> > +			unsigned int status_counter;
> > +			char buf[RPC_MAX_ADDRBUFLEN];
> > +			int j, opcnt = 0;
> > +
> > +			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
> > +				continue;
> > +
> > +			status_counter = atomic_read(&rqstp->rq_status_counter);
> 
> Neil said:
> 
> > I suggest you add add a counter to the rqstp which is incremented from
> > even to odd after parsing a request - including he v4 parsing needed to
> > have a sable ->opcnt - and then incremented from odd to even when the
> > request is complete.
> > Then this code samples the counter, skips the rqst if the counter is
> > even, and resamples the counter after collecting the data.  If it has
> > changed, the drop the record.
> 
> I don't see a check if the status counter is even.

...and there does need to be one.  If the counter is even, then the
fields are meaningless and unstable.  The RQ_BUSY check is, I think,
meant to check if the fields are meaningful, but they aren't meaningful
until some time after RQ_BUSY is clear.

I would replace the "RQ_BUSY not set" test with "counter is even"

> 
> Also, as above, I'm not sure atomic_read() is necessary here. Maybe
> just READ_ONCE() ? Neil, any thoughts?

Agree - we don't need an atomic as there is a single writer.
I think
  smp_store_release(rqstp->counter, rqstp->counter|1)
to increment it after parsing the request.  This makes it abundantly
clear the value will be odd, and ensures that if another thread sees the
'odd' value, then it can also see the results of the parse.

To increment after processing the request,
   smp_store_release(rqstp->counter, rqstp->counter + 1)

Then
  counter = smp_load_acquire(rqstp->counter);
  if ((counter & 1) == 0)
to test if it is even before reading the state.  This ensure that if it
sees "odd' it will see the results of the parse.

and
  if ((smp_load_acquire(counter) == counter)  continue;

before trusting that the data we read was consistent.

Note that we "release" *after* something and "acquire" *before"
something.
I think it helps to always think about what the access is "before" or
"after" when reasoning about barriers.
checkpatch will want a comment before these acquire and release
operation.  I recommend using the corresponding word "before" or "after"
in that comment.

NeilBrown
NeilBrown July 28, 2023, 10:23 p.m. UTC | #3
On Sat, 29 Jul 2023, Lorenzo Bianconi wrote:
> Introduce rpc_status entry in nfsd debug filesystem in order to dump
> pending RPC requests debugging information.
> 
> Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=366
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>  fs/nfsd/nfs4proc.c         |   4 +-
>  fs/nfsd/nfsctl.c           |  10 +++
>  fs/nfsd/nfsd.h             |   2 +
>  fs/nfsd/nfssvc.c           | 122 +++++++++++++++++++++++++++++++++++++
>  include/linux/sunrpc/svc.h |   1 +
>  net/sunrpc/svc.c           |   2 +-
>  6 files changed, 137 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> index f0f318e78630..b7ad3081bc36 100644
> --- a/fs/nfsd/nfs4proc.c
> +++ b/fs/nfsd/nfs4proc.c
> @@ -2497,8 +2497,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
>  
>  static const struct nfsd4_operation nfsd4_ops[];
>  
> -static const char *nfsd4_op_name(unsigned opnum);
> -
>  /*
>   * Enforce NFSv4.1 COMPOUND ordering rules:
>   *
> @@ -3628,7 +3626,7 @@ void warn_on_nonidempotent_op(struct nfsd4_op *op)
>  	}
>  }
>  
> -static const char *nfsd4_op_name(unsigned opnum)
> +const char *nfsd4_op_name(unsigned opnum)
>  {
>  	if (opnum < ARRAY_SIZE(nfsd4_ops))
>  		return nfsd4_ops[opnum].op_name;
> diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> index 35d2e2cde1eb..f2e4f4b1e4d1 100644
> --- a/fs/nfsd/nfsctl.c
> +++ b/fs/nfsd/nfsctl.c
> @@ -57,6 +57,8 @@ enum {
>  	NFSD_RecoveryDir,
>  	NFSD_V4EndGrace,
>  #endif
> +	NFSD_Rpc_Status,

I think NFSD_Rpc_Status needs to come before the CONFIG_NFSD_V4 block.
Otherwise the comment above (which I apparently approved) makes even
less sense than it does now.
(Maybe just remove the comment??)

> +
>  	NFSD_MaxReserved
>  };
>  
> @@ -195,6 +197,13 @@ static inline struct net *netns(struct file *file)
>  	return file_inode(file)->i_sb->s_fs_info;
>  }
>  
> +static const struct file_operations nfsd_rpc_status_operations = {
> +	.open		= nfsd_rpc_status_open,
> +	.read		= seq_read,
> +	.llseek		= seq_lseek,
> +	.release	= nfsd_pool_stats_release,
> +};
> +
>  /*
>   * write_unlock_ip - Release all locks used by a client
>   *
> @@ -1400,6 +1409,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
>  		[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
>  		[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
>  #endif
> +		[NFSD_Rpc_Status] = {"rpc_status", &nfsd_rpc_status_operations, S_IRUGO},
>  		/* last one */ {""}
>  	};
>  
> diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
> index d88498f8b275..75a3e1d55bc8 100644
> --- a/fs/nfsd/nfsd.h
> +++ b/fs/nfsd/nfsd.h
> @@ -94,6 +94,7 @@ int		nfsd_get_nrthreads(int n, int *, struct net *);
>  int		nfsd_set_nrthreads(int n, int *, struct net *);
>  int		nfsd_pool_stats_open(struct inode *, struct file *);
>  int		nfsd_pool_stats_release(struct inode *, struct file *);
> +int		nfsd_rpc_status_open(struct inode *inode, struct file *file);
>  void		nfsd_shutdown_threads(struct net *net);
>  
>  void		nfsd_put(struct net *net);
> @@ -506,6 +507,7 @@ extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
>  
>  extern void nfsd4_init_leases_net(struct nfsd_net *nn);
>  
> +const char *nfsd4_op_name(unsigned opnum);
>  #else /* CONFIG_NFSD_V4 */
>  static inline int nfsd4_is_junction(struct dentry *dentry)
>  {
> diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> index 97830e28c140..e9e954b5ae47 100644
> --- a/fs/nfsd/nfssvc.c
> +++ b/fs/nfsd/nfssvc.c
> @@ -1057,6 +1057,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
>  	if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
>  		goto out_decode_err;
>  
> +	atomic_inc(&rqstp->rq_status_counter);
> +
>  	rp = NULL;
>  	switch (nfsd_cache_lookup(rqstp, &rp)) {
>  	case RC_DOIT:
> @@ -1074,6 +1076,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
>  	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
>  		goto out_encode_err;
>  
> +	atomic_inc(&rqstp->rq_status_counter);
> +
>  	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
>  out_cached_reply:
>  	return 1;
> @@ -1149,3 +1153,121 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
>  	mutex_unlock(&nfsd_mutex);
>  	return ret;
>  }
> +
> +static int nfsd_rpc_status_show(struct seq_file *m, void *v)
> +{
> +	struct inode *inode = file_inode(m->file);
> +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> +	int i;
> +
> +	rcu_read_lock();
> +
> +	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
> +		struct svc_rqst *rqstp;
> +
> +		list_for_each_entry_rcu(rqstp,
> +				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
> +				rq_all) {
> +			struct nfsd_rpc_status_info {
> +				struct sockaddr daddr;
> +				struct sockaddr saddr;
> +				unsigned long rq_flags;
> +				__be32 rq_xid;
> +				u32 rq_prog;
> +				u32 rq_vers;
> +				const char *pc_name;
> +				ktime_t rq_stime;
> +				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
> +			} rqstp_info;
> +			unsigned int status_counter;
> +			char buf[RPC_MAX_ADDRBUFLEN];
> +			int j, opcnt = 0;
> +
> +			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
> +				continue;
> +
> +			status_counter = atomic_read(&rqstp->rq_status_counter);
> +
> +			rqstp_info.rq_xid = rqstp->rq_xid;
> +			rqstp_info.rq_flags = rqstp->rq_flags;
> +			rqstp_info.rq_prog = rqstp->rq_prog;
> +			rqstp_info.rq_vers = rqstp->rq_vers;
> +			rqstp_info.pc_name = svc_proc_name(rqstp);
> +			rqstp_info.rq_stime = rqstp->rq_stime;
> +			memcpy(&rqstp_info.daddr, svc_daddr(rqstp),
> +			       sizeof(struct sockaddr));
> +			memcpy(&rqstp_info.saddr, svc_addr(rqstp),
> +			       sizeof(struct sockaddr));
> +
> +#ifdef CONFIG_NFSD_V4
> +			if (rqstp->rq_vers == NFS4_VERSION &&
> +			    rqstp->rq_proc == NFSPROC4_COMPOUND) {
> +				/* NFSv4 compund */
> +				struct nfsd4_compoundargs *args = rqstp->rq_argp;
> +
> +				opcnt = args->opcnt;
> +				for (j = 0; j < opcnt; j++) {
> +					struct nfsd4_op *op = &args->ops[j];
> +
> +					rqstp_info.opnum[j] = op->opnum;
> +				}
> +			}
> +#endif /* CONFIG_NFSD_V4 */
> +
> +			/* In order to detect if the RPC request is pending and
> +			 * RPC info are stable we check if rq_status_counter
> +			 * has been incremented during the handler processing.
> +			 */
> +			if (status_counter != atomic_read(&rqstp->rq_status_counter))
> +				continue;
> +
> +			seq_printf(m,
> +				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",

Please drop the commas.
It might be defensible to have commas and no spaces by comparing with
/proc/fs/nfsd/supported_krb5_enctypes, but the dominant pattern is to
use only spaces to separate fields on /proc files.

Thanks,
NeilBrown
kernel test robot July 29, 2023, 5:18 a.m. UTC | #4
Hi Lorenzo,

kernel test robot noticed the following build errors:

[auto build test ERROR on next-20230728]
[cannot apply to trondmy-nfs/linux-next linus/master v6.5-rc3 v6.5-rc2 v6.5-rc1 v6.5-rc3]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Lorenzo-Bianconi/SUNRPC-add-verbose-parameter-to-__svc_print_addr/20230729-024652
base:   next-20230728
patch link:    https://lore.kernel.org/r/a23a0482a465299ac06d07d191e0c9377a11a4d1.1690569488.git.lorenzo%40kernel.org
patch subject: [PATCH v4 2/2] NFSD: add rpc_status entry in nfsd debug filesystem
config: alpha-defconfig (https://download.01.org/0day-ci/archive/20230729/202307291354.ujwCaQTt-lkp@intel.com/config)
compiler: alpha-linux-gcc (GCC) 12.3.0
reproduce: (https://download.01.org/0day-ci/archive/20230729/202307291354.ujwCaQTt-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202307291354.ujwCaQTt-lkp@intel.com/

All error/warnings (new ones prefixed by >>):

   fs/nfsd/nfssvc.c: In function 'nfsd_rpc_status_show':
>> fs/nfsd/nfssvc.c:1241:44: error: implicit declaration of function 'nfsd4_op_name'; did you mean 'nfsd_rename'? [-Werror=implicit-function-declaration]
    1241 |                                            nfsd4_op_name(rqstp_info.opnum[j]),
         |                                            ^~~~~~~~~~~~~
         |                                            nfsd_rename
>> fs/nfsd/nfssvc.c:1240:50: warning: format '%s' expects argument of type 'char *', but argument 3 has type 'int' [-Wformat=]
    1240 |                                 seq_printf(m, " %s%s",
         |                                                 ~^
         |                                                  |
         |                                                  char *
         |                                                 %d
    1241 |                                            nfsd4_op_name(rqstp_info.opnum[j]),
         |                                            ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
         |                                            |
         |                                            int
   cc1: some warnings being treated as errors


vim +1241 fs/nfsd/nfssvc.c

  1216	
  1217				/* In order to detect if the RPC request is pending and
  1218				 * RPC info are stable we check if rq_status_counter
  1219				 * has been incremented during the handler processing.
  1220				 */
  1221				if (status_counter != atomic_read(&rqstp->rq_status_counter))
  1222					continue;
  1223	
  1224				seq_printf(m,
  1225					   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
  1226					   be32_to_cpu(rqstp_info.rq_xid),
  1227					   rqstp_info.rq_flags,
  1228					   rqstp_info.rq_prog,
  1229					   rqstp_info.rq_vers,
  1230					   rqstp_info.pc_name,
  1231					   ktime_to_us(rqstp_info.rq_stime));
  1232	
  1233				seq_printf(m, " %s,",
  1234					   __svc_print_addr(&rqstp_info.saddr, buf,
  1235							    sizeof(buf), false));
  1236				seq_printf(m, " %s,",
  1237					   __svc_print_addr(&rqstp_info.daddr, buf,
  1238							    sizeof(buf), false));
  1239				for (j = 0; j < opcnt; j++)
> 1240					seq_printf(m, " %s%s",
> 1241						   nfsd4_op_name(rqstp_info.opnum[j]),
  1242						   j == opcnt - 1 ? "," : "");
  1243				seq_puts(m, "\n");
  1244			}
  1245		}
  1246	
  1247		rcu_read_unlock();
  1248	
  1249		return 0;
  1250	}
  1251
Lorenzo Bianconi Aug. 2, 2023, 8:58 a.m. UTC | #5
> On Sat, 29 Jul 2023, Lorenzo Bianconi wrote:
> > Introduce rpc_status entry in nfsd debug filesystem in order to dump
> > pending RPC requests debugging information.
> > 

Hi Neil,

thx for the review.

> > Link: https://bugzilla.linux-nfs.org/show_bug.cgi?id=366
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > ---
> >  fs/nfsd/nfs4proc.c         |   4 +-
> >  fs/nfsd/nfsctl.c           |  10 +++
> >  fs/nfsd/nfsd.h             |   2 +
> >  fs/nfsd/nfssvc.c           | 122 +++++++++++++++++++++++++++++++++++++
> >  include/linux/sunrpc/svc.h |   1 +
> >  net/sunrpc/svc.c           |   2 +-
> >  6 files changed, 137 insertions(+), 4 deletions(-)
> > 
> > diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> > index f0f318e78630..b7ad3081bc36 100644
> > --- a/fs/nfsd/nfs4proc.c
> > +++ b/fs/nfsd/nfs4proc.c
> > @@ -2497,8 +2497,6 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
> >  
> >  static const struct nfsd4_operation nfsd4_ops[];
> >  
> > -static const char *nfsd4_op_name(unsigned opnum);
> > -
> >  /*
> >   * Enforce NFSv4.1 COMPOUND ordering rules:
> >   *
> > @@ -3628,7 +3626,7 @@ void warn_on_nonidempotent_op(struct nfsd4_op *op)
> >  	}
> >  }
> >  
> > -static const char *nfsd4_op_name(unsigned opnum)
> > +const char *nfsd4_op_name(unsigned opnum)
> >  {
> >  	if (opnum < ARRAY_SIZE(nfsd4_ops))
> >  		return nfsd4_ops[opnum].op_name;
> > diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> > index 35d2e2cde1eb..f2e4f4b1e4d1 100644
> > --- a/fs/nfsd/nfsctl.c
> > +++ b/fs/nfsd/nfsctl.c
> > @@ -57,6 +57,8 @@ enum {
> >  	NFSD_RecoveryDir,
> >  	NFSD_V4EndGrace,
> >  #endif
> > +	NFSD_Rpc_Status,
> 
> I think NFSD_Rpc_Status needs to come before the CONFIG_NFSD_V4 block.
> Otherwise the comment above (which I apparently approved) makes even
> less sense than it does now.
> (Maybe just remove the comment??)

ack, right. I will fix it.

> 
> > +
> >  	NFSD_MaxReserved
> >  };
> >  
> > @@ -195,6 +197,13 @@ static inline struct net *netns(struct file *file)
> >  	return file_inode(file)->i_sb->s_fs_info;
> >  }
> >  

[...]

> > +#endif /* CONFIG_NFSD_V4 */
> > +
> > +			/* In order to detect if the RPC request is pending and
> > +			 * RPC info are stable we check if rq_status_counter
> > +			 * has been incremented during the handler processing.
> > +			 */
> > +			if (status_counter != atomic_read(&rqstp->rq_status_counter))
> > +				continue;
> > +
> > +			seq_printf(m,
> > +				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
> 
> Please drop the commas.
> It might be defensible to have commas and no spaces by comparing with
> /proc/fs/nfsd/supported_krb5_enctypes, but the dominant pattern is to
> use only spaces to separate fields on /proc files.

ack, I will fix it.

Regards,
Lorenzo

> 
> Thanks,
> NeilBrown
>
Lorenzo Bianconi Aug. 4, 2023, 7:56 a.m. UTC | #6
[...]
> > +	atomic_inc(&rqstp->rq_status_counter);
> > +
> 
> Does this really have to be an atomic_t ? Seems like nfsd_dispatch
> is the only function updating it. You might need release semantics
> here and acquire semantics in nfsd_rpc_status_show(). I'd rather
> avoid a full-on atomic op in nfsd_dispatch() unless it's absolutely
> needed.

ack, I agree. I will work on it.

> 
> Also, do you need to bump the rq_status_counter in the other RPC
> dispatch routines (lockd and nfs callback) too?

the only consumer at the moment is nfsd, do you think we should add them in
advance for lockd as well?

> 
> 
> >  	rp = NULL;
> >  	switch (nfsd_cache_lookup(rqstp, &rp)) {
> >  	case RC_DOIT:
> > @@ -1074,6 +1076,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
> >  	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
> >  		goto out_encode_err;
> >  
> > +	atomic_inc(&rqstp->rq_status_counter);
> > +
> >  	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
> >  out_cached_reply:
> >  	return 1;
> > @@ -1149,3 +1153,121 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> >  	mutex_unlock(&nfsd_mutex);
> >  	return ret;
> >  }
> > +
> > +static int nfsd_rpc_status_show(struct seq_file *m, void *v)
> > +{
> > +	struct inode *inode = file_inode(m->file);
> > +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > +	int i;
> > +
> > +	rcu_read_lock();
> > +
> > +	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
> > +		struct svc_rqst *rqstp;
> > +
> > +		list_for_each_entry_rcu(rqstp,
> > +				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
> > +				rq_all) {
> > +			struct nfsd_rpc_status_info {
> > +				struct sockaddr daddr;
> > +				struct sockaddr saddr;
> > +				unsigned long rq_flags;
> > +				__be32 rq_xid;
> > +				u32 rq_prog;
> > +				u32 rq_vers;
> > +				const char *pc_name;
> > +				ktime_t rq_stime;
> > +				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
> > +			} rqstp_info;
> > +			unsigned int status_counter;
> > +			char buf[RPC_MAX_ADDRBUFLEN];
> > +			int j, opcnt = 0;
> > +
> > +			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
> > +				continue;
> > +
> > +			status_counter = atomic_read(&rqstp->rq_status_counter);
> 
> Neil said:
> 
> > I suggest you add add a counter to the rqstp which is incremented from
> > even to odd after parsing a request - including he v4 parsing needed to
> > have a sable ->opcnt - and then incremented from odd to even when the
> > request is complete.
> > Then this code samples the counter, skips the rqst if the counter is
> > even, and resamples the counter after collecting the data.  If it has
> > changed, the drop the record.
> 
> I don't see a check if the status counter is even.
> 
> Also, as above, I'm not sure atomic_read() is necessary here. Maybe
> just READ_ONCE() ? Neil, any thoughts?


I used the RQ_BUSY check instead of checking if the counter is even, but I can
see the point now. I will fix it.

> 
> 
> > +
> > +			rqstp_info.rq_xid = rqstp->rq_xid;
> > +			rqstp_info.rq_flags = rqstp->rq_flags;
> > +			rqstp_info.rq_prog = rqstp->rq_prog;
> > +			rqstp_info.rq_vers = rqstp->rq_vers;
> > +			rqstp_info.pc_name = svc_proc_name(rqstp);
> > +			rqstp_info.rq_stime = rqstp->rq_stime;
> > +			memcpy(&rqstp_info.daddr, svc_daddr(rqstp),
> > +			       sizeof(struct sockaddr));
> > +			memcpy(&rqstp_info.saddr, svc_addr(rqstp),
> > +			       sizeof(struct sockaddr));
> > +
> > +#ifdef CONFIG_NFSD_V4
> > +			if (rqstp->rq_vers == NFS4_VERSION &&
> > +			    rqstp->rq_proc == NFSPROC4_COMPOUND) {
> > +				/* NFSv4 compund */
> > +				struct nfsd4_compoundargs *args = rqstp->rq_argp;
> > +
> > +				opcnt = args->opcnt;
> > +				for (j = 0; j < opcnt; j++) {
> > +					struct nfsd4_op *op = &args->ops[j];
> > +
> > +					rqstp_info.opnum[j] = op->opnum;
> > +				}
> > +			}
> > +#endif /* CONFIG_NFSD_V4 */
> > +
> > +			/* In order to detect if the RPC request is pending and
> > +			 * RPC info are stable we check if rq_status_counter
> > +			 * has been incremented during the handler processing.
> > +			 */
> > +			if (status_counter != atomic_read(&rqstp->rq_status_counter))
> > +				continue;
> > +
> > +			seq_printf(m,
> > +				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
> > +				   be32_to_cpu(rqstp_info.rq_xid),
> > +				   rqstp_info.rq_flags,
> > +				   rqstp_info.rq_prog,
> > +				   rqstp_info.rq_vers,
> > +				   rqstp_info.pc_name,
> > +				   ktime_to_us(rqstp_info.rq_stime));
> > +
> > +			seq_printf(m, " %s,",
> > +				   __svc_print_addr(&rqstp_info.saddr, buf,
> > +						    sizeof(buf), false));
> > +			seq_printf(m, " %s,",
> > +				   __svc_print_addr(&rqstp_info.daddr, buf,
> > +						    sizeof(buf), false));
> > +			for (j = 0; j < opcnt; j++)
> > +				seq_printf(m, " %s%s",
> > +					   nfsd4_op_name(rqstp_info.opnum[j]),
> > +					   j == opcnt - 1 ? "," : "");
> > +			seq_puts(m, "\n");
> > +		}
> > +	}
> > +
> > +	rcu_read_unlock();
> > +
> > +	return 0;
> > +}
> > +
> > +/**
> > + * nfsd_rpc_status_open - Atomically copy a write verifier
> 
> The kdoc comment maybe was copied, pasted, and then not updated?

ack, I will fix it.

Regards,
Lorenzo

> 
> > + * @inode: entry inode pointer.
> > + * @file: entry file pointer.
> > + *
> > + * This routine dumps pending RPC requests info queued into nfs server.
> > + */
> > +int nfsd_rpc_status_open(struct inode *inode, struct file *file)
> > +{
> > +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > +
> > +	mutex_lock(&nfsd_mutex);
> > +	if (!nn->nfsd_serv) {
> > +		mutex_unlock(&nfsd_mutex);
> > +		return -ENODEV;
> > +	}
> > +
> > +	svc_get(nn->nfsd_serv);
> > +	mutex_unlock(&nfsd_mutex);
> > +
> > +	return single_open(file, nfsd_rpc_status_show, inode->i_private);
> > +}
> > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> > index fe1394cc1371..cb516da9e270 100644
> > --- a/include/linux/sunrpc/svc.h
> > +++ b/include/linux/sunrpc/svc.h
> > @@ -270,6 +270,7 @@ struct svc_rqst {
> >  						 * net namespace
> >  						 */
> >  	void **			rq_lease_breaker; /* The v4 client breaking a lease */
> > +	atomic_t		rq_status_counter; /* RPC processing counter */
> >  };
> >  
> >  #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
> > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > index 587811a002c9..44eac83b35a1 100644
> > --- a/net/sunrpc/svc.c
> > +++ b/net/sunrpc/svc.c
> > @@ -1629,7 +1629,7 @@ const char *svc_proc_name(const struct svc_rqst *rqstp)
> >  		return rqstp->rq_procinfo->pc_name;
> >  	return "unknown";
> >  }
> > -
> > +EXPORT_SYMBOL_GPL(svc_proc_name);
> >  
> >  /**
> >   * svc_encode_result_payload - mark a range of bytes as a result payload
> > -- 
> > 2.41.0
> > 
> 
> -- 
> Chuck Lever
>
Lorenzo Bianconi Aug. 4, 2023, 8:02 a.m. UTC | #7
> On Sat, 29 Jul 2023, Chuck Lever wrote:
> > On Fri, Jul 28, 2023 at 08:44:04PM +0200, Lorenzo Bianconi wrote:

[...]

> > 
> > Neil said:
> > 
> > > I suggest you add add a counter to the rqstp which is incremented from
> > > even to odd after parsing a request - including he v4 parsing needed to
> > > have a sable ->opcnt - and then incremented from odd to even when the
> > > request is complete.
> > > Then this code samples the counter, skips the rqst if the counter is
> > > even, and resamples the counter after collecting the data.  If it has
> > > changed, the drop the record.
> > 
> > I don't see a check if the status counter is even.
> 
> ...and there does need to be one.  If the counter is even, then the
> fields are meaningless and unstable.  The RQ_BUSY check is, I think,
> meant to check if the fields are meaningful, but they aren't meaningful
> until some time after RQ_BUSY is clear.
> 
> I would replace the "RQ_BUSY not set" test with "counter is even"

ack, I will fix it.

> 
> > 
> > Also, as above, I'm not sure atomic_read() is necessary here. Maybe
> > just READ_ONCE() ? Neil, any thoughts?
> 
> Agree - we don't need an atomic as there is a single writer.
> I think
>   smp_store_release(rqstp->counter, rqstp->counter|1)
> to increment it after parsing the request.  This makes it abundantly
> clear the value will be odd, and ensures that if another thread sees the
> 'odd' value, then it can also see the results of the parse.
> 
> To increment after processing the request,
>    smp_store_release(rqstp->counter, rqstp->counter + 1)
> 
> Then
>   counter = smp_load_acquire(rqstp->counter);
>   if ((counter & 1) == 0)
> to test if it is even before reading the state.  This ensure that if it
> sees "odd' it will see the results of the parse.
> 
> and
>   if ((smp_load_acquire(counter) == counter)  continue;
> 
> before trusting that the data we read was consistent.
> 
> Note that we "release" *after* something and "acquire" *before"
> something.
> I think it helps to always think about what the access is "before" or
> "after" when reasoning about barriers.
> checkpatch will want a comment before these acquire and release
> operation.  I recommend using the corresponding word "before" or "after"
> in that comment.


ack, I will add it.

Regards,
Lorenzo

> 
> NeilBrown
>
Chuck Lever Aug. 4, 2023, 2:02 p.m. UTC | #8
On Fri, Aug 04, 2023 at 09:56:59AM +0200, Lorenzo Bianconi wrote:
> [...]
> > > +	atomic_inc(&rqstp->rq_status_counter);
> > > +
> > 
> > Does this really have to be an atomic_t ? Seems like nfsd_dispatch
> > is the only function updating it. You might need release semantics
> > here and acquire semantics in nfsd_rpc_status_show(). I'd rather
> > avoid a full-on atomic op in nfsd_dispatch() unless it's absolutely
> > needed.
> 
> ack, I agree. I will work on it.
> 
> > 
> > Also, do you need to bump the rq_status_counter in the other RPC
> > dispatch routines (lockd and nfs callback) too?
> 
> the only consumer at the moment is nfsd, do you think we should add them in
> advance for lockd as well?

Ah... the reporting facility is added to the NFS server, not to
SunRPC. My mistake. I got confused because struct svc_rqst is an RPC
layer object.

So this is kind of a layering violation. The counter is for NFS
procedures, but it's being added to svc_rqst, which is an RPC layer
object. We're trying to fix up the places where NFSD stashes its
state in RPC structures. (I recall that Neil suggested this
arrangement).

But I don't have a better suggestion for where to put the counter.
Leave it where it is for now, and we'll come up with something
down the road ... or we'll decide we want the same watchdog for
all RPC services. ;-)


> > >  	rp = NULL;
> > >  	switch (nfsd_cache_lookup(rqstp, &rp)) {
> > >  	case RC_DOIT:
> > > @@ -1074,6 +1076,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
> > >  	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
> > >  		goto out_encode_err;
> > >  
> > > +	atomic_inc(&rqstp->rq_status_counter);
> > > +
> > >  	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
> > >  out_cached_reply:
> > >  	return 1;
> > > @@ -1149,3 +1153,121 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> > >  	mutex_unlock(&nfsd_mutex);
> > >  	return ret;
> > >  }
> > > +
> > > +static int nfsd_rpc_status_show(struct seq_file *m, void *v)
> > > +{
> > > +	struct inode *inode = file_inode(m->file);
> > > +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > > +	int i;
> > > +
> > > +	rcu_read_lock();
> > > +
> > > +	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
> > > +		struct svc_rqst *rqstp;
> > > +
> > > +		list_for_each_entry_rcu(rqstp,
> > > +				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
> > > +				rq_all) {
> > > +			struct nfsd_rpc_status_info {
> > > +				struct sockaddr daddr;
> > > +				struct sockaddr saddr;
> > > +				unsigned long rq_flags;
> > > +				__be32 rq_xid;
> > > +				u32 rq_prog;
> > > +				u32 rq_vers;
> > > +				const char *pc_name;
> > > +				ktime_t rq_stime;
> > > +				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
> > > +			} rqstp_info;
> > > +			unsigned int status_counter;
> > > +			char buf[RPC_MAX_ADDRBUFLEN];
> > > +			int j, opcnt = 0;
> > > +
> > > +			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
> > > +				continue;
> > > +
> > > +			status_counter = atomic_read(&rqstp->rq_status_counter);
> > 
> > Neil said:
> > 
> > > I suggest you add add a counter to the rqstp which is incremented from
> > > even to odd after parsing a request - including he v4 parsing needed to
> > > have a sable ->opcnt - and then incremented from odd to even when the
> > > request is complete.
> > > Then this code samples the counter, skips the rqst if the counter is
> > > even, and resamples the counter after collecting the data.  If it has
> > > changed, the drop the record.
> > 
> > I don't see a check if the status counter is even.
> > 
> > Also, as above, I'm not sure atomic_read() is necessary here. Maybe
> > just READ_ONCE() ? Neil, any thoughts?
> 
> 
> I used the RQ_BUSY check instead of checking if the counter is even, but I can
> see the point now. I will fix it.
> 
> > 
> > 
> > > +
> > > +			rqstp_info.rq_xid = rqstp->rq_xid;
> > > +			rqstp_info.rq_flags = rqstp->rq_flags;
> > > +			rqstp_info.rq_prog = rqstp->rq_prog;
> > > +			rqstp_info.rq_vers = rqstp->rq_vers;
> > > +			rqstp_info.pc_name = svc_proc_name(rqstp);
> > > +			rqstp_info.rq_stime = rqstp->rq_stime;
> > > +			memcpy(&rqstp_info.daddr, svc_daddr(rqstp),
> > > +			       sizeof(struct sockaddr));
> > > +			memcpy(&rqstp_info.saddr, svc_addr(rqstp),
> > > +			       sizeof(struct sockaddr));
> > > +
> > > +#ifdef CONFIG_NFSD_V4
> > > +			if (rqstp->rq_vers == NFS4_VERSION &&
> > > +			    rqstp->rq_proc == NFSPROC4_COMPOUND) {
> > > +				/* NFSv4 compund */
> > > +				struct nfsd4_compoundargs *args = rqstp->rq_argp;
> > > +
> > > +				opcnt = args->opcnt;
> > > +				for (j = 0; j < opcnt; j++) {
> > > +					struct nfsd4_op *op = &args->ops[j];
> > > +
> > > +					rqstp_info.opnum[j] = op->opnum;
> > > +				}
> > > +			}
> > > +#endif /* CONFIG_NFSD_V4 */
> > > +
> > > +			/* In order to detect if the RPC request is pending and
> > > +			 * RPC info are stable we check if rq_status_counter
> > > +			 * has been incremented during the handler processing.
> > > +			 */
> > > +			if (status_counter != atomic_read(&rqstp->rq_status_counter))
> > > +				continue;
> > > +
> > > +			seq_printf(m,
> > > +				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
> > > +				   be32_to_cpu(rqstp_info.rq_xid),
> > > +				   rqstp_info.rq_flags,
> > > +				   rqstp_info.rq_prog,
> > > +				   rqstp_info.rq_vers,
> > > +				   rqstp_info.pc_name,
> > > +				   ktime_to_us(rqstp_info.rq_stime));
> > > +
> > > +			seq_printf(m, " %s,",
> > > +				   __svc_print_addr(&rqstp_info.saddr, buf,
> > > +						    sizeof(buf), false));
> > > +			seq_printf(m, " %s,",
> > > +				   __svc_print_addr(&rqstp_info.daddr, buf,
> > > +						    sizeof(buf), false));
> > > +			for (j = 0; j < opcnt; j++)
> > > +				seq_printf(m, " %s%s",
> > > +					   nfsd4_op_name(rqstp_info.opnum[j]),
> > > +					   j == opcnt - 1 ? "," : "");
> > > +			seq_puts(m, "\n");
> > > +		}
> > > +	}
> > > +
> > > +	rcu_read_unlock();
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +/**
> > > + * nfsd_rpc_status_open - Atomically copy a write verifier
> > 
> > The kdoc comment maybe was copied, pasted, and then not updated?
> 
> ack, I will fix it.
> 
> Regards,
> Lorenzo
> 
> > 
> > > + * @inode: entry inode pointer.
> > > + * @file: entry file pointer.
> > > + *
> > > + * This routine dumps pending RPC requests info queued into nfs server.
> > > + */
> > > +int nfsd_rpc_status_open(struct inode *inode, struct file *file)
> > > +{
> > > +	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > > +
> > > +	mutex_lock(&nfsd_mutex);
> > > +	if (!nn->nfsd_serv) {
> > > +		mutex_unlock(&nfsd_mutex);
> > > +		return -ENODEV;
> > > +	}
> > > +
> > > +	svc_get(nn->nfsd_serv);
> > > +	mutex_unlock(&nfsd_mutex);
> > > +
> > > +	return single_open(file, nfsd_rpc_status_show, inode->i_private);
> > > +}
> > > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> > > index fe1394cc1371..cb516da9e270 100644
> > > --- a/include/linux/sunrpc/svc.h
> > > +++ b/include/linux/sunrpc/svc.h
> > > @@ -270,6 +270,7 @@ struct svc_rqst {
> > >  						 * net namespace
> > >  						 */
> > >  	void **			rq_lease_breaker; /* The v4 client breaking a lease */
> > > +	atomic_t		rq_status_counter; /* RPC processing counter */
> > >  };
> > >  
> > >  #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
> > > diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> > > index 587811a002c9..44eac83b35a1 100644
> > > --- a/net/sunrpc/svc.c
> > > +++ b/net/sunrpc/svc.c
> > > @@ -1629,7 +1629,7 @@ const char *svc_proc_name(const struct svc_rqst *rqstp)
> > >  		return rqstp->rq_procinfo->pc_name;
> > >  	return "unknown";
> > >  }
> > > -
> > > +EXPORT_SYMBOL_GPL(svc_proc_name);
> > >  
> > >  /**
> > >   * svc_encode_result_payload - mark a range of bytes as a result payload
> > > -- 
> > > 2.41.0
> > > 
> > 
> > -- 
> > Chuck Lever
> >
diff mbox series

Patch

diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f0f318e78630..b7ad3081bc36 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -2497,8 +2497,6 @@  static inline void nfsd4_increment_op_stats(u32 opnum)
 
 static const struct nfsd4_operation nfsd4_ops[];
 
-static const char *nfsd4_op_name(unsigned opnum);
-
 /*
  * Enforce NFSv4.1 COMPOUND ordering rules:
  *
@@ -3628,7 +3626,7 @@  void warn_on_nonidempotent_op(struct nfsd4_op *op)
 	}
 }
 
-static const char *nfsd4_op_name(unsigned opnum)
+const char *nfsd4_op_name(unsigned opnum)
 {
 	if (opnum < ARRAY_SIZE(nfsd4_ops))
 		return nfsd4_ops[opnum].op_name;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 35d2e2cde1eb..f2e4f4b1e4d1 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -57,6 +57,8 @@  enum {
 	NFSD_RecoveryDir,
 	NFSD_V4EndGrace,
 #endif
+	NFSD_Rpc_Status,
+
 	NFSD_MaxReserved
 };
 
@@ -195,6 +197,13 @@  static inline struct net *netns(struct file *file)
 	return file_inode(file)->i_sb->s_fs_info;
 }
 
+static const struct file_operations nfsd_rpc_status_operations = {
+	.open		= nfsd_rpc_status_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= nfsd_pool_stats_release,
+};
+
 /*
  * write_unlock_ip - Release all locks used by a client
  *
@@ -1400,6 +1409,7 @@  static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
 		[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
 		[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
 #endif
+		[NFSD_Rpc_Status] = {"rpc_status", &nfsd_rpc_status_operations, S_IRUGO},
 		/* last one */ {""}
 	};
 
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index d88498f8b275..75a3e1d55bc8 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -94,6 +94,7 @@  int		nfsd_get_nrthreads(int n, int *, struct net *);
 int		nfsd_set_nrthreads(int n, int *, struct net *);
 int		nfsd_pool_stats_open(struct inode *, struct file *);
 int		nfsd_pool_stats_release(struct inode *, struct file *);
+int		nfsd_rpc_status_open(struct inode *inode, struct file *file);
 void		nfsd_shutdown_threads(struct net *net);
 
 void		nfsd_put(struct net *net);
@@ -506,6 +507,7 @@  extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
 
 extern void nfsd4_init_leases_net(struct nfsd_net *nn);
 
+const char *nfsd4_op_name(unsigned opnum);
 #else /* CONFIG_NFSD_V4 */
 static inline int nfsd4_is_junction(struct dentry *dentry)
 {
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 97830e28c140..e9e954b5ae47 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -1057,6 +1057,8 @@  int nfsd_dispatch(struct svc_rqst *rqstp)
 	if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
 		goto out_decode_err;
 
+	atomic_inc(&rqstp->rq_status_counter);
+
 	rp = NULL;
 	switch (nfsd_cache_lookup(rqstp, &rp)) {
 	case RC_DOIT:
@@ -1074,6 +1076,8 @@  int nfsd_dispatch(struct svc_rqst *rqstp)
 	if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
 		goto out_encode_err;
 
+	atomic_inc(&rqstp->rq_status_counter);
+
 	nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
 out_cached_reply:
 	return 1;
@@ -1149,3 +1153,121 @@  int nfsd_pool_stats_release(struct inode *inode, struct file *file)
 	mutex_unlock(&nfsd_mutex);
 	return ret;
 }
+
+static int nfsd_rpc_status_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = file_inode(m->file);
+	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
+		struct svc_rqst *rqstp;
+
+		list_for_each_entry_rcu(rqstp,
+				&nn->nfsd_serv->sv_pools[i].sp_all_threads,
+				rq_all) {
+			struct nfsd_rpc_status_info {
+				struct sockaddr daddr;
+				struct sockaddr saddr;
+				unsigned long rq_flags;
+				__be32 rq_xid;
+				u32 rq_prog;
+				u32 rq_vers;
+				const char *pc_name;
+				ktime_t rq_stime;
+				u32 opnum[NFSD_MAX_OPS_PER_COMPOUND]; /* NFSv4 compund */
+			} rqstp_info;
+			unsigned int status_counter;
+			char buf[RPC_MAX_ADDRBUFLEN];
+			int j, opcnt = 0;
+
+			if (!test_bit(RQ_BUSY, &rqstp->rq_flags))
+				continue;
+
+			status_counter = atomic_read(&rqstp->rq_status_counter);
+
+			rqstp_info.rq_xid = rqstp->rq_xid;
+			rqstp_info.rq_flags = rqstp->rq_flags;
+			rqstp_info.rq_prog = rqstp->rq_prog;
+			rqstp_info.rq_vers = rqstp->rq_vers;
+			rqstp_info.pc_name = svc_proc_name(rqstp);
+			rqstp_info.rq_stime = rqstp->rq_stime;
+			memcpy(&rqstp_info.daddr, svc_daddr(rqstp),
+			       sizeof(struct sockaddr));
+			memcpy(&rqstp_info.saddr, svc_addr(rqstp),
+			       sizeof(struct sockaddr));
+
+#ifdef CONFIG_NFSD_V4
+			if (rqstp->rq_vers == NFS4_VERSION &&
+			    rqstp->rq_proc == NFSPROC4_COMPOUND) {
+				/* NFSv4 compund */
+				struct nfsd4_compoundargs *args = rqstp->rq_argp;
+
+				opcnt = args->opcnt;
+				for (j = 0; j < opcnt; j++) {
+					struct nfsd4_op *op = &args->ops[j];
+
+					rqstp_info.opnum[j] = op->opnum;
+				}
+			}
+#endif /* CONFIG_NFSD_V4 */
+
+			/* In order to detect if the RPC request is pending and
+			 * RPC info are stable we check if rq_status_counter
+			 * has been incremented during the handler processing.
+			 */
+			if (status_counter != atomic_read(&rqstp->rq_status_counter))
+				continue;
+
+			seq_printf(m,
+				   "0x%08x, 0x%08lx, 0x%08x, NFSv%d, %s, %016lld,",
+				   be32_to_cpu(rqstp_info.rq_xid),
+				   rqstp_info.rq_flags,
+				   rqstp_info.rq_prog,
+				   rqstp_info.rq_vers,
+				   rqstp_info.pc_name,
+				   ktime_to_us(rqstp_info.rq_stime));
+
+			seq_printf(m, " %s,",
+				   __svc_print_addr(&rqstp_info.saddr, buf,
+						    sizeof(buf), false));
+			seq_printf(m, " %s,",
+				   __svc_print_addr(&rqstp_info.daddr, buf,
+						    sizeof(buf), false));
+			for (j = 0; j < opcnt; j++)
+				seq_printf(m, " %s%s",
+					   nfsd4_op_name(rqstp_info.opnum[j]),
+					   j == opcnt - 1 ? "," : "");
+			seq_puts(m, "\n");
+		}
+	}
+
+	rcu_read_unlock();
+
+	return 0;
+}
+
+/**
+ * nfsd_rpc_status_open - Atomically copy a write verifier
+ * @inode: entry inode pointer.
+ * @file: entry file pointer.
+ *
+ * This routine dumps pending RPC requests info queued into nfs server.
+ */
+int nfsd_rpc_status_open(struct inode *inode, struct file *file)
+{
+	struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
+
+	mutex_lock(&nfsd_mutex);
+	if (!nn->nfsd_serv) {
+		mutex_unlock(&nfsd_mutex);
+		return -ENODEV;
+	}
+
+	svc_get(nn->nfsd_serv);
+	mutex_unlock(&nfsd_mutex);
+
+	return single_open(file, nfsd_rpc_status_show, inode->i_private);
+}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fe1394cc1371..cb516da9e270 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -270,6 +270,7 @@  struct svc_rqst {
 						 * net namespace
 						 */
 	void **			rq_lease_breaker; /* The v4 client breaking a lease */
+	atomic_t		rq_status_counter; /* RPC processing counter */
 };
 
 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 587811a002c9..44eac83b35a1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1629,7 +1629,7 @@  const char *svc_proc_name(const struct svc_rqst *rqstp)
 		return rqstp->rq_procinfo->pc_name;
 	return "unknown";
 }
-
+EXPORT_SYMBOL_GPL(svc_proc_name);
 
 /**
  * svc_encode_result_payload - mark a range of bytes as a result payload