diff mbox series

[v2,1/3] SUNRPC query xprt switch for number of active transports

Message ID 20210609215319.5518-2-olga.kornievskaia@gmail.com (mailing list archive)
State New, archived
Headers show
Series don't collapse transports for the trunkable | expand

Commit Message

Olga Kornievskaia June 9, 2021, 9:53 p.m. UTC
From: Olga Kornievskaia <kolga@netapp.com>

To keep track of how many transports have already been added, add
ability to query the number.

Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
---
 include/linux/sunrpc/clnt.h |  2 ++
 net/sunrpc/clnt.c           | 13 +++++++++++++
 2 files changed, 15 insertions(+)

Comments

Chuck Lever III June 10, 2021, 1:34 p.m. UTC | #1
> On Jun 9, 2021, at 5:53 PM, Olga Kornievskaia <olga.kornievskaia@gmail.com> wrote:
> 
> From: Olga Kornievskaia <kolga@netapp.com>
> 
> To keep track of how many transports have already been added, add
> ability to query the number.

Just a random thought: Would it make more sense to plug the
maximum allowed transports value into the struct rpc_clnt,
and then rpc_clnt_test_and_add_xprt() could prevent the
addition of the new xprt if the maximum would be exceeded?


> Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
> ---
> include/linux/sunrpc/clnt.h |  2 ++
> net/sunrpc/clnt.c           | 13 +++++++++++++
> 2 files changed, 15 insertions(+)
> 
> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
> index 02e7a5863d28..27042f1e581f 100644
> --- a/include/linux/sunrpc/clnt.h
> +++ b/include/linux/sunrpc/clnt.h
> @@ -234,6 +234,8 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
> void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
> bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
> 			const struct sockaddr *sap);
> +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *);
> +
> void rpc_cleanup_clids(void);
> 
> static inline int rpc_reply_expected(struct rpc_task *task)
> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> index 42623d6b8f0e..b46262ffcf72 100644
> --- a/net/sunrpc/clnt.c
> +++ b/net/sunrpc/clnt.c
> @@ -2959,6 +2959,19 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
> }
> EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
> 
> +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *clnt)
> +{
> +	struct rpc_xprt_switch *xps;
> +	size_t num;
> +
> +	rcu_read_lock();
> +	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
> +	num = xps->xps_nactive;
> +	rcu_read_unlock();
> +	return num;
> +}
> +EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_nactive);
> +
> #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
> static void rpc_show_header(void)
> {
> -- 
> 2.27.0
> 

--
Chuck Lever
Olga Kornievskaia June 10, 2021, 2:50 p.m. UTC | #2
On Thu, Jun 10, 2021 at 9:34 AM Chuck Lever III <chuck.lever@oracle.com> wrote:
>
>
>
> > On Jun 9, 2021, at 5:53 PM, Olga Kornievskaia <olga.kornievskaia@gmail.com> wrote:
> >
> > From: Olga Kornievskaia <kolga@netapp.com>
> >
> > To keep track of how many transports have already been added, add
> > ability to query the number.
>
> Just a random thought: Would it make more sense to plug the
> maximum allowed transports value into the struct rpc_clnt,
> and then rpc_clnt_test_and_add_xprt() could prevent the
> addition of the new xprt if the maximum would be exceeded?

Sure that could be done. Then the value of maximum allowed transports
should be defined at the RPC layer and not NFS? I currently check for
upper bounds during the parsing of the mount option, would I be not
doing that or exposing the RPC value to the NFS layer?

Actually I think it might be nice to add some kind of warning in the
log that a trunking transport wasn't created because the limit was
reached but if we move things to the RPC, we can't distinguish between
nconnect and trunking transports.

> > Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
> > ---
> > include/linux/sunrpc/clnt.h |  2 ++
> > net/sunrpc/clnt.c           | 13 +++++++++++++
> > 2 files changed, 15 insertions(+)
> >
> > diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
> > index 02e7a5863d28..27042f1e581f 100644
> > --- a/include/linux/sunrpc/clnt.h
> > +++ b/include/linux/sunrpc/clnt.h
> > @@ -234,6 +234,8 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
> > void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
> > bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
> >                       const struct sockaddr *sap);
> > +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *);
> > +
> > void rpc_cleanup_clids(void);
> >
> > static inline int rpc_reply_expected(struct rpc_task *task)
> > diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> > index 42623d6b8f0e..b46262ffcf72 100644
> > --- a/net/sunrpc/clnt.c
> > +++ b/net/sunrpc/clnt.c
> > @@ -2959,6 +2959,19 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
> > }
> > EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
> >
> > +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *clnt)
> > +{
> > +     struct rpc_xprt_switch *xps;
> > +     size_t num;
> > +
> > +     rcu_read_lock();
> > +     xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
> > +     num = xps->xps_nactive;
> > +     rcu_read_unlock();
> > +     return num;
> > +}
> > +EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_nactive);
> > +
> > #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
> > static void rpc_show_header(void)
> > {
> > --
> > 2.27.0
> >
>
> --
> Chuck Lever
>
>
>
Chuck Lever III June 10, 2021, 2:55 p.m. UTC | #3
> On Jun 10, 2021, at 10:50 AM, Olga Kornievskaia <olga.kornievskaia@gmail.com> wrote:
> 
> On Thu, Jun 10, 2021 at 9:34 AM Chuck Lever III <chuck.lever@oracle.com> wrote:
>> 
>> 
>> 
>>> On Jun 9, 2021, at 5:53 PM, Olga Kornievskaia <olga.kornievskaia@gmail.com> wrote:
>>> 
>>> From: Olga Kornievskaia <kolga@netapp.com>
>>> 
>>> To keep track of how many transports have already been added, add
>>> ability to query the number.
>> 
>> Just a random thought: Would it make more sense to plug the
>> maximum allowed transports value into the struct rpc_clnt,
>> and then rpc_clnt_test_and_add_xprt() could prevent the
>> addition of the new xprt if the maximum would be exceeded?
> 
> Sure that could be done. Then the value of maximum allowed transports
> should be defined at the RPC layer and not NFS?

The limits are defined by the upper layer (NFS) and enforced
by the RPC client.


> I currently check for
> upper bounds during the parsing of the mount option, would I be not
> doing that or exposing the RPC value to the NFS layer?


> Actually I think it might be nice to add some kind of warning in the
> log that a trunking transport wasn't created because the limit was
> reached but if we move things to the RPC, we can't distinguish between
> nconnect and trunking transports.

One or two new tracepoints might help in any case. I wouldn't
say admins need a log message, but someone debugging something
might want one.


>>> Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
>>> ---
>>> include/linux/sunrpc/clnt.h |  2 ++
>>> net/sunrpc/clnt.c           | 13 +++++++++++++
>>> 2 files changed, 15 insertions(+)
>>> 
>>> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
>>> index 02e7a5863d28..27042f1e581f 100644
>>> --- a/include/linux/sunrpc/clnt.h
>>> +++ b/include/linux/sunrpc/clnt.h
>>> @@ -234,6 +234,8 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
>>> void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
>>> bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
>>>                      const struct sockaddr *sap);
>>> +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *);
>>> +
>>> void rpc_cleanup_clids(void);
>>> 
>>> static inline int rpc_reply_expected(struct rpc_task *task)
>>> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
>>> index 42623d6b8f0e..b46262ffcf72 100644
>>> --- a/net/sunrpc/clnt.c
>>> +++ b/net/sunrpc/clnt.c
>>> @@ -2959,6 +2959,19 @@ bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
>>> }
>>> EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
>>> 
>>> +size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *clnt)
>>> +{
>>> +     struct rpc_xprt_switch *xps;
>>> +     size_t num;
>>> +
>>> +     rcu_read_lock();
>>> +     xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
>>> +     num = xps->xps_nactive;
>>> +     rcu_read_unlock();
>>> +     return num;
>>> +}
>>> +EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_nactive);
>>> +
>>> #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
>>> static void rpc_show_header(void)
>>> {
>>> --
>>> 2.27.0
>>> 
>> 
>> --
>> Chuck Lever

--
Chuck Lever
diff mbox series

Patch

diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 02e7a5863d28..27042f1e581f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -234,6 +234,8 @@  void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
 void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
 bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
 			const struct sockaddr *sap);
+size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *);
+
 void rpc_cleanup_clids(void);
 
 static inline int rpc_reply_expected(struct rpc_task *task)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 42623d6b8f0e..b46262ffcf72 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2959,6 +2959,19 @@  bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
 }
 EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr);
 
+size_t rpc_clnt_xprt_switch_nactive(struct rpc_clnt *clnt)
+{
+	struct rpc_xprt_switch *xps;
+	size_t num;
+
+	rcu_read_lock();
+	xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+	num = xps->xps_nactive;
+	rcu_read_unlock();
+	return num;
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_nactive);
+
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 static void rpc_show_header(void)
 {