diff mbox

[4/5] NFSD/SUNRPC: Check rpc_xprt out of xs_setup_bc_tcp

Message ID 52CE7AE6.4010603@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Kinglong Mee Jan. 9, 2014, 10:33 a.m. UTC
Besides checking rpc_xprt out of xs_setup_bc_tcp,
increase it's reference (it's important).

Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
---
 fs/nfsd/nfs4callback.c      | 19 ++++++++++++++++++-
 include/linux/sunrpc/xprt.h | 13 ++++++++++++-
 net/sunrpc/xprt.c           | 12 ------------
 net/sunrpc/xprtsock.c       |  9 ---------
 4 files changed, 30 insertions(+), 23 deletions(-)

Comments

J. Bruce Fields Jan. 9, 2014, 4:26 p.m. UTC | #1
On Thu, Jan 09, 2014 at 06:33:10PM +0800, Kinglong Mee wrote:
> Besides checking rpc_xprt out of xs_setup_bc_tcp,
> increase it's reference (it's important).

This sounds wrong to me: the presence of a backchannel can't prevent the
client's connection from going away.  Instead, when the connection dies,
any associated backchannels should be immediately destroyed.

--b.

> 
> Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
> ---
>  fs/nfsd/nfs4callback.c      | 19 ++++++++++++++++++-
>  include/linux/sunrpc/xprt.h | 13 ++++++++++++-
>  net/sunrpc/xprt.c           | 12 ------------
>  net/sunrpc/xprtsock.c       |  9 ---------
>  4 files changed, 30 insertions(+), 23 deletions(-)
> 
> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> index 7f05cd1..39c8ef8 100644
> --- a/fs/nfsd/nfs4callback.c
> +++ b/fs/nfsd/nfs4callback.c
> @@ -32,6 +32,7 @@
>   */
>  
>  #include <linux/sunrpc/clnt.h>
> +#include <linux/sunrpc/xprt.h>
>  #include <linux/sunrpc/svc_xprt.h>
>  #include <linux/slab.h>
>  #include "nfsd.h"
> @@ -635,6 +636,22 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
>  	}
>  }
>  
> +static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
> +{
> +	struct rpc_xprt *xprt;
> +
> +	if (args->protocol != XPRT_TRANSPORT_BC_TCP)
> +		return rpc_create(args);
> +
> +	xprt = args->bc_xprt->xpt_bc_xprt;
> +	if (xprt) {
> +		xprt_get(xprt);
> +		return rpc_create_xprt(args, xprt);
> +	}
> +
> +	return rpc_create(args);
> +}
> +
>  static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
>  {
>  	struct rpc_timeout	timeparms = {
> @@ -674,7 +691,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
>  		args.authflavor = ses->se_cb_sec.flavor;
>  	}
>  	/* Create RPC client */
> -	client = rpc_create(&args);
> +	client = create_backchannel_client(&args);
>  	if (IS_ERR(client)) {
>  		dprintk("NFSD: couldn't create callback client: %ld\n",
>  			PTR_ERR(client));
> diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
> index 8097b9d..3e5efb2 100644
> --- a/include/linux/sunrpc/xprt.h
> +++ b/include/linux/sunrpc/xprt.h
> @@ -295,13 +295,24 @@ int			xprt_adjust_timeout(struct rpc_rqst *req);
>  void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
>  void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
>  void			xprt_release(struct rpc_task *task);
> -struct rpc_xprt *	xprt_get(struct rpc_xprt *xprt);
>  void			xprt_put(struct rpc_xprt *xprt);
>  struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
>  				unsigned int num_prealloc,
>  				unsigned int max_req);
>  void			xprt_free(struct rpc_xprt *);
>  
> +/**
> + * xprt_get - return a reference to an RPC transport.
> + * @xprt: pointer to the transport
> + *
> + */
> +static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
> +{
> +	if (atomic_inc_not_zero(&xprt->count))
> +		return xprt;
> +	return NULL;
> +}
> +
>  static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
>  {
>  	return p + xprt->tsh_size;
> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> index ddd198e..b0a1bbb 100644
> --- a/net/sunrpc/xprt.c
> +++ b/net/sunrpc/xprt.c
> @@ -1383,15 +1383,3 @@ void xprt_put(struct rpc_xprt *xprt)
>  	if (atomic_dec_and_test(&xprt->count))
>  		xprt_destroy(xprt);
>  }
> -
> -/**
> - * xprt_get - return a reference to an RPC transport.
> - * @xprt: pointer to the transport
> - *
> - */
> -struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
> -{
> -	if (atomic_inc_not_zero(&xprt->count))
> -		return xprt;
> -	return NULL;
> -}
> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
> index 7289e3c..37ea15a 100644
> --- a/net/sunrpc/xprtsock.c
> +++ b/net/sunrpc/xprtsock.c
> @@ -2923,15 +2923,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
>  	struct svc_sock *bc_sock;
>  	struct rpc_xprt *ret;
>  
> -	if (args->bc_xprt->xpt_bc_xprt) {
> -		/*
> -		 * This server connection already has a backchannel
> -		 * transport; we can't create a new one, as we wouldn't
> -		 * be able to match replies based on xid any more.  So,
> -		 * reuse the already-existing one:
> -		 */
> -		 return args->bc_xprt->xpt_bc_xprt;
> -	}
>  	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
>  			xprt_tcp_slot_table_entries);
>  	if (IS_ERR(xprt))
> -- 
> 1.8.4.2
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Trond Myklebust Jan. 9, 2014, 5:27 p.m. UTC | #2
On Jan 9, 2014, at 11:26, Dr Fields James Bruce <bfields@fieldses.org> wrote:

> On Thu, Jan 09, 2014 at 06:33:10PM +0800, Kinglong Mee wrote:
>> Besides checking rpc_xprt out of xs_setup_bc_tcp,
>> increase it's reference (it's important).
> 
> This sounds wrong to me: the presence of a backchannel can't prevent the
> client's connection from going away.  Instead, when the connection dies,
> any associated backchannels should be immediately destroyed.

Hi Bruce,

The right way to deal with this is to have knfsd shut down the rpc_client when it detects the TCP disconnection event. The xprt->count shouldn’t be an issue here: it has nothing to do with the socket connection state.

Cheers
  Trond--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kinglong Mee Jan. 10, 2014, 2:41 a.m. UTC | #3
On 01/10/2014 01:27 AM, Trond Myklebust wrote:
> 
> On Jan 9, 2014, at 11:26, Dr Fields James Bruce <bfields@fieldses.org> wrote:
> 
>> On Thu, Jan 09, 2014 at 06:33:10PM +0800, Kinglong Mee wrote:
>>> Besides checking rpc_xprt out of xs_setup_bc_tcp,
>>> increase it's reference (it's important).
>>
>> This sounds wrong to me: the presence of a backchannel can't prevent the
>> client's connection from going away.  Instead, when the connection dies,
>> any associated backchannels should be immediately destroyed.
> 
> Hi Bruce,
> 
> The right way to deal with this is to have knfsd shut down the rpc_client
> when it detects the TCP disconnection event. 

Yes, that's right.
Knfsd has do it as you said.

When getting xprt's status of XPT_CLOSE in svc_recv/svc_handle_xprt, 
knfsd will delete the xprt by svc_delete_xprt.

In svc_delete_xprt, knfsd calls call_xpt_users to notify those users of the xprt.
So, nfsd4_conn_lost will be called to free the connection.
After freeing connection, nfsd4_probe_callback will update the callback for the client.
And, the clp->cl_cb_client will be shutdown in nfsd4_process_cb_update.

At last, all using of the xprt will be released.
I have test it, that's OK. 

> The xprt->count shouldn’t be an issue here: 
> it has nothing to do with the socket connection state.

The xprt of backchannel, there are two references, 
1. rpc_clnt
2. svc_xprt

For rpc_clnt, initialize the xprt->count in xs_setup_bc_tcp,
or increase it in create_backchannel_client, and, decrease in rpc_free_client.

For svc_xprt, increase it in xs_setup_bc_tcp, and decrease in svc_xprt_free.

thanks,
Kinglong Mee

--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
J. Bruce Fields Jan. 27, 2014, 11:08 p.m. UTC | #4
Apologies, I completely dropped this and now I've lost the thread of the
discussion.

What is your most recent patchset?

--b.

On Fri, Jan 10, 2014 at 10:41:24AM +0800, Kinglong Mee wrote:
> On 01/10/2014 01:27 AM, Trond Myklebust wrote:
> > 
> > On Jan 9, 2014, at 11:26, Dr Fields James Bruce <bfields@fieldses.org> wrote:
> > 
> >> On Thu, Jan 09, 2014 at 06:33:10PM +0800, Kinglong Mee wrote:
> >>> Besides checking rpc_xprt out of xs_setup_bc_tcp,
> >>> increase it's reference (it's important).
> >>
> >> This sounds wrong to me: the presence of a backchannel can't prevent the
> >> client's connection from going away.  Instead, when the connection dies,
> >> any associated backchannels should be immediately destroyed.
> > 
> > Hi Bruce,
> > 
> > The right way to deal with this is to have knfsd shut down the rpc_client
> > when it detects the TCP disconnection event. 
> 
> Yes, that's right.
> Knfsd has do it as you said.
> 
> When getting xprt's status of XPT_CLOSE in svc_recv/svc_handle_xprt, 
> knfsd will delete the xprt by svc_delete_xprt.
> 
> In svc_delete_xprt, knfsd calls call_xpt_users to notify those users of the xprt.
> So, nfsd4_conn_lost will be called to free the connection.
> After freeing connection, nfsd4_probe_callback will update the callback for the client.
> And, the clp->cl_cb_client will be shutdown in nfsd4_process_cb_update.
> 
> At last, all using of the xprt will be released.
> I have test it, that's OK. 
> 
> > The xprt->count shouldn’t be an issue here: 
> > it has nothing to do with the socket connection state.
> 
> The xprt of backchannel, there are two references, 
> 1. rpc_clnt
> 2. svc_xprt
> 
> For rpc_clnt, initialize the xprt->count in xs_setup_bc_tcp,
> or increase it in create_backchannel_client, and, decrease in rpc_free_client.
> 
> For svc_xprt, increase it in xs_setup_bc_tcp, and decrease in svc_xprt_free.
> 
> thanks,
> Kinglong Mee
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Kinglong Mee Feb. 11, 2014, 12:08 p.m. UTC | #5
Dear Bruce,

I am sorry for replying so late.
I will resend those patchs again.

thanks,
Kinglong Mee

On Tue, Jan 28, 2014 at 7:08 AM, Dr Fields James Bruce
<bfields@fieldses.org> wrote:
> Apologies, I completely dropped this and now I've lost the thread of the
> discussion.
>
> What is your most recent patchset?
>
> --b.
>
> On Fri, Jan 10, 2014 at 10:41:24AM +0800, Kinglong Mee wrote:
>> On 01/10/2014 01:27 AM, Trond Myklebust wrote:
>> >
>> > On Jan 9, 2014, at 11:26, Dr Fields James Bruce <bfields@fieldses.org> wrote:
>> >
>> >> On Thu, Jan 09, 2014 at 06:33:10PM +0800, Kinglong Mee wrote:
>> >>> Besides checking rpc_xprt out of xs_setup_bc_tcp,
>> >>> increase it's reference (it's important).
>> >>
>> >> This sounds wrong to me: the presence of a backchannel can't prevent the
>> >> client's connection from going away.  Instead, when the connection dies,
>> >> any associated backchannels should be immediately destroyed.
>> >
>> > Hi Bruce,
>> >
>> > The right way to deal with this is to have knfsd shut down the rpc_client
>> > when it detects the TCP disconnection event.
>>
>> Yes, that's right.
>> Knfsd has do it as you said.
>>
>> When getting xprt's status of XPT_CLOSE in svc_recv/svc_handle_xprt,
>> knfsd will delete the xprt by svc_delete_xprt.
>>
>> In svc_delete_xprt, knfsd calls call_xpt_users to notify those users of the xprt.
>> So, nfsd4_conn_lost will be called to free the connection.
>> After freeing connection, nfsd4_probe_callback will update the callback for the client.
>> And, the clp->cl_cb_client will be shutdown in nfsd4_process_cb_update.
>>
>> At last, all using of the xprt will be released.
>> I have test it, that's OK.
>>
>> > The xprt->count shouldn't be an issue here:
>> > it has nothing to do with the socket connection state.
>>
>> The xprt of backchannel, there are two references,
>> 1. rpc_clnt
>> 2. svc_xprt
>>
>> For rpc_clnt, initialize the xprt->count in xs_setup_bc_tcp,
>> or increase it in create_backchannel_client, and, decrease in rpc_free_client.
>>
>> For svc_xprt, increase it in xs_setup_bc_tcp, and decrease in svc_xprt_free.
>>
>> thanks,
>> Kinglong Mee
>>
--
To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 7f05cd1..39c8ef8 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -32,6 +32,7 @@ 
  */
 
 #include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/xprt.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/slab.h>
 #include "nfsd.h"
@@ -635,6 +636,22 @@  static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
 	}
 }
 
+static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
+{
+	struct rpc_xprt *xprt;
+
+	if (args->protocol != XPRT_TRANSPORT_BC_TCP)
+		return rpc_create(args);
+
+	xprt = args->bc_xprt->xpt_bc_xprt;
+	if (xprt) {
+		xprt_get(xprt);
+		return rpc_create_xprt(args, xprt);
+	}
+
+	return rpc_create(args);
+}
+
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
 	struct rpc_timeout	timeparms = {
@@ -674,7 +691,7 @@  static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
 		args.authflavor = ses->se_cb_sec.flavor;
 	}
 	/* Create RPC client */
-	client = rpc_create(&args);
+	client = create_backchannel_client(&args);
 	if (IS_ERR(client)) {
 		dprintk("NFSD: couldn't create callback client: %ld\n",
 			PTR_ERR(client));
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 8097b9d..3e5efb2 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -295,13 +295,24 @@  int			xprt_adjust_timeout(struct rpc_rqst *req);
 void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void			xprt_release(struct rpc_task *task);
-struct rpc_xprt *	xprt_get(struct rpc_xprt *xprt);
 void			xprt_put(struct rpc_xprt *xprt);
 struct rpc_xprt *	xprt_alloc(struct net *net, size_t size,
 				unsigned int num_prealloc,
 				unsigned int max_req);
 void			xprt_free(struct rpc_xprt *);
 
+/**
+ * xprt_get - return a reference to an RPC transport.
+ * @xprt: pointer to the transport
+ *
+ */
+static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
+{
+	if (atomic_inc_not_zero(&xprt->count))
+		return xprt;
+	return NULL;
+}
+
 static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
 {
 	return p + xprt->tsh_size;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ddd198e..b0a1bbb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1383,15 +1383,3 @@  void xprt_put(struct rpc_xprt *xprt)
 	if (atomic_dec_and_test(&xprt->count))
 		xprt_destroy(xprt);
 }
-
-/**
- * xprt_get - return a reference to an RPC transport.
- * @xprt: pointer to the transport
- *
- */
-struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
-{
-	if (atomic_inc_not_zero(&xprt->count))
-		return xprt;
-	return NULL;
-}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7289e3c..37ea15a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2923,15 +2923,6 @@  static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
 	struct svc_sock *bc_sock;
 	struct rpc_xprt *ret;
 
-	if (args->bc_xprt->xpt_bc_xprt) {
-		/*
-		 * This server connection already has a backchannel
-		 * transport; we can't create a new one, as we wouldn't
-		 * be able to match replies based on xid any more.  So,
-		 * reuse the already-existing one:
-		 */
-		 return args->bc_xprt->xpt_bc_xprt;
-	}
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
 			xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))