diff mbox series

[RFC,7/9] svcrdma: Post RDMA Writes while XDR encoding replies

Message ID 20200214155019.3848.58561.stgit@klimt.1015granger.net (mailing list archive)
State Not Applicable
Headers show
Series Address bugzilla 198053 and more ... | expand

Commit Message

Chuck Lever III Feb. 14, 2020, 3:50 p.m. UTC
The only RPC/RDMA ordering requirement between RDMA Writes and RDMA
Sends is that Writes have to be posted before the Send that sends
the RPC Reply for that Write payload.

The Linux NFS server implementation now has a transport method that
can post READ Payload Writes earlier than svc_rdma_sendto:

   ->xpo_read_payload.

Goals:
- Get RDMA Writes going earlier so they are more likely to be
  complete at the remote end before the Send completes.
- Allow more parallelism when dispatching RDMA operations by
  posting RDMA Writes before taking xpt_mutex.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/svc_rdma_sendto.c |   26 +++++++++++---------------
 1 file changed, 11 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 3c0e41d378bc..273453a336b0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -843,15 +843,9 @@  int svc_rdma_sendto(struct svc_rqst *rqstp)
 	*p++ = xdr_zero;
 	*p   = xdr_zero;
 
-	if (wr_lst) {
-		/* XXX: Presume the client sent only one Write chunk */
-		ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr,
-						rctxt->rc_read_payload_offset,
-						rctxt->rc_read_payload_length);
-		if (ret < 0)
-			goto err2;
-		svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
-	}
+	if (wr_lst)
+		svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst,
+					       rctxt->rc_read_payload_length);
 	if (rp_ch) {
 		ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
 		if (ret < 0)
@@ -896,16 +890,16 @@  int svc_rdma_sendto(struct svc_rqst *rqstp)
  * @offset: payload's byte offset in @xdr
  * @length: size of payload, in bytes
  *
- * Returns zero on success.
- *
- * For the moment, just record the xdr_buf location of the READ
- * payload. svc_rdma_sendto will use that location later when
- * we actually send the payload.
+ * Returns zero on success, or a negative errno.
  */
 int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
 			  unsigned int length)
 {
 	struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
+	struct svcxprt_rdma *rdma;
+
+	if (!rctxt->rc_write_list)
+		return 0;
 
 	/* XXX: Just one READ payload slot for now, since our
 	 * transport implementation currently supports only one
@@ -914,5 +908,7 @@  int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
 	rctxt->rc_read_payload_offset = offset;
 	rctxt->rc_read_payload_length = length;
 
-	return 0;
+	rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
+	return svc_rdma_send_write_chunk(rdma, rctxt->rc_write_list,
+					 &rqstp->rq_res, offset, length);
 }