diff mbox series

[3/3] SUNRPC: Allow dynamic allocation of back channel slots

Message ID 20190304001928.4397-3-trond.myklebust@hammerspace.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Trond Myklebust March 4, 2019, 12:19 a.m. UTC
Now that the reads happen in a process context rather than a softirq,
it is safe to allocate back channel slots using a reclaiming
allocation.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
---
 net/sunrpc/backchannel_rqst.c | 41 +++++++++++++++++++++--------------
 1 file changed, 25 insertions(+), 16 deletions(-)

Comments

Chuck Lever III March 4, 2019, 3:26 p.m. UTC | #1
Hi Trond-


> On Mar 3, 2019, at 7:19 PM, Trond Myklebust <trondmy@gmail.com> wrote:
> 
> Now that the reads happen in a process context rather than a softirq,
> it is safe to allocate back channel slots using a reclaiming
> allocation.

Is this a required change for transports, or simply an optimization?
Wondering if a similar change is needed for RPC-over-RDMA.


> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
> ---
> net/sunrpc/backchannel_rqst.c | 41 +++++++++++++++++++++--------------
> 1 file changed, 25 insertions(+), 16 deletions(-)
> 
> diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
> index b9313c15ee3a..c47d82622fd1 100644
> --- a/net/sunrpc/backchannel_rqst.c
> +++ b/net/sunrpc/backchannel_rqst.c
> @@ -235,7 +235,8 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
> 		list_empty(&xprt->bc_pa_list) ? "true" : "false");
> }
> 
> -static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
> +static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
> +		struct rpc_rqst *new)
> {
> 	struct rpc_rqst *req = NULL;
> 
> @@ -243,10 +244,9 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
> 	if (atomic_read(&xprt->bc_free_slots) <= 0)
> 		goto not_found;
> 	if (list_empty(&xprt->bc_pa_list)) {
> -		req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
> -		if (!req)
> +		if (!new)
> 			goto not_found;
> -		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
> +		list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
> 		xprt->bc_alloc_count++;
> 	}
> 	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
> @@ -256,8 +256,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
> 			sizeof(req->rq_private_buf));
> 	req->rq_xid = xid;
> 	req->rq_connect_cookie = xprt->connect_cookie;
> -not_found:
> 	dprintk("RPC:       backchannel req=%p\n", req);
> +not_found:
> 	return req;
> }
> 
> @@ -320,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
>  */
> struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
> {
> -	struct rpc_rqst *req;
> -
> -	spin_lock(&xprt->bc_pa_lock);
> -	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
> -		if (req->rq_connect_cookie != xprt->connect_cookie)
> -			continue;
> -		if (req->rq_xid == xid)
> -			goto found;
> -	}
> -	req = xprt_alloc_bc_request(xprt, xid);
> +	struct rpc_rqst *req, *new = NULL;
> +
> +	do {
> +		spin_lock(&xprt->bc_pa_lock);
> +		list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
> +			if (req->rq_connect_cookie != xprt->connect_cookie)
> +				continue;
> +			if (req->rq_xid == xid)
> +				goto found;
> +		}
> +		req = xprt_get_bc_request(xprt, xid, new);
> found:
> -	spin_unlock(&xprt->bc_pa_lock);
> +		spin_unlock(&xprt->bc_pa_lock);
> +		if (new) {
> +			if (req != new)
> +				xprt_free_bc_rqst(new);
> +			break;
> +		} else if (req)
> +			break;
> +		new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
> +	} while (new);
> 	return req;
> }
> 
> -- 
> 2.20.1
> 

--
Chuck Lever
Trond Myklebust March 4, 2019, 3:52 p.m. UTC | #2
On Mon, 2019-03-04 at 10:26 -0500, Chuck Lever wrote:
> Hi Trond-
> 
> 
> > On Mar 3, 2019, at 7:19 PM, Trond Myklebust <trondmy@gmail.com>
> > wrote:
> > 
> > Now that the reads happen in a process context rather than a
> > softirq,
> > it is safe to allocate back channel slots using a reclaiming
> > allocation.
> 
> Is this a required change for transports, or simply an optimization?
> Wondering if a similar change is needed for RPC-over-RDMA.

Now that we no longer need to be atomic for the socket receive code, I
figure we can afford the call. The alternative is that we have to drop
the connection, which is also very costly.

Cheers
  Trond
diff mbox series

Patch

diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index b9313c15ee3a..c47d82622fd1 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -235,7 +235,8 @@  void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
 		list_empty(&xprt->bc_pa_list) ? "true" : "false");
 }
 
-static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
+static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
+		struct rpc_rqst *new)
 {
 	struct rpc_rqst *req = NULL;
 
@@ -243,10 +244,9 @@  static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
 	if (atomic_read(&xprt->bc_free_slots) <= 0)
 		goto not_found;
 	if (list_empty(&xprt->bc_pa_list)) {
-		req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
-		if (!req)
+		if (!new)
 			goto not_found;
-		list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+		list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
 		xprt->bc_alloc_count++;
 	}
 	req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
@@ -256,8 +256,8 @@  static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
 			sizeof(req->rq_private_buf));
 	req->rq_xid = xid;
 	req->rq_connect_cookie = xprt->connect_cookie;
-not_found:
 	dprintk("RPC:       backchannel req=%p\n", req);
+not_found:
 	return req;
 }
 
@@ -320,18 +320,27 @@  void xprt_free_bc_rqst(struct rpc_rqst *req)
  */
 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
 {
-	struct rpc_rqst *req;
-
-	spin_lock(&xprt->bc_pa_lock);
-	list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
-		if (req->rq_connect_cookie != xprt->connect_cookie)
-			continue;
-		if (req->rq_xid == xid)
-			goto found;
-	}
-	req = xprt_alloc_bc_request(xprt, xid);
+	struct rpc_rqst *req, *new = NULL;
+
+	do {
+		spin_lock(&xprt->bc_pa_lock);
+		list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
+			if (req->rq_connect_cookie != xprt->connect_cookie)
+				continue;
+			if (req->rq_xid == xid)
+				goto found;
+		}
+		req = xprt_get_bc_request(xprt, xid, new);
 found:
-	spin_unlock(&xprt->bc_pa_lock);
+		spin_unlock(&xprt->bc_pa_lock);
+		if (new) {
+			if (req != new)
+				xprt_free_bc_rqst(new);
+			break;
+		} else if (req)
+			break;
+		new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
+	} while (new);
 	return req;
 }