diff mbox series

[v2,2/6] xprtrdma: Simplify rpcrdma_convert_kvec() and frwr_map()

Message ID 161236944071.1030487.460353530274045763.stgit@manet.1015granger.net (mailing list archive)
State Superseded
Headers show
Series RPC/RDMA client fixes | expand

Commit Message

Chuck Lever III Feb. 3, 2021, 4:24 p.m. UTC
Clean up.

Remove a conditional branch from the SGL set-up loop in frwr_map():
Instead of using either sg_set_page() or sg_set_buf(), initialize
the mr_page field properly when rpcrdma_convert_kvec() converts the
kvec to an SGL entry. frwr_map() can then invoke sg_set_page()
unconditionally.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/frwr_ops.c  |   12 ++++--------
 net/sunrpc/xprtrdma/rpc_rdma.c  |    2 +-
 net/sunrpc/xprtrdma/xprt_rdma.h |    9 +++++----
 3 files changed, 10 insertions(+), 13 deletions(-)

Comments

Tom Talpey Feb. 3, 2021, 6:07 p.m. UTC | #1
Reviewed-By: Tom Talpey <tom@talpey.com>

On 2/3/2021 11:24 AM, Chuck Lever wrote:
> Clean up.
> 
> Remove a conditional branch from the SGL set-up loop in frwr_map():
> Instead of using either sg_set_page() or sg_set_buf(), initialize
> the mr_page field properly when rpcrdma_convert_kvec() converts the
> kvec to an SGL entry. frwr_map() can then invoke sg_set_page()
> unconditionally.
> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
>   net/sunrpc/xprtrdma/frwr_ops.c  |   12 ++++--------
>   net/sunrpc/xprtrdma/rpc_rdma.c  |    2 +-
>   net/sunrpc/xprtrdma/xprt_rdma.h |    9 +++++----
>   3 files changed, 10 insertions(+), 13 deletions(-)
> 
> diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
> index baca49fe83af..13a50f77dddb 100644
> --- a/net/sunrpc/xprtrdma/frwr_ops.c
> +++ b/net/sunrpc/xprtrdma/frwr_ops.c
> @@ -306,14 +306,10 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
>   	if (nsegs > ep->re_max_fr_depth)
>   		nsegs = ep->re_max_fr_depth;
>   	for (i = 0; i < nsegs;) {
> -		if (seg->mr_page)
> -			sg_set_page(&mr->mr_sg[i],
> -				    seg->mr_page,
> -				    seg->mr_len,
> -				    offset_in_page(seg->mr_offset));
> -		else
> -			sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
> -				   seg->mr_len);
> +		sg_set_page(&mr->mr_sg[i],
> +			    seg->mr_page,
> +			    seg->mr_len,
> +			    offset_in_page(seg->mr_offset));
>   
>   		++seg;
>   		++i;
> diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
> index 832765f3ebba..529adb6ad4db 100644
> --- a/net/sunrpc/xprtrdma/rpc_rdma.c
> +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
> @@ -214,7 +214,7 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
>   		     unsigned int *n)
>   {
>   	if (vec->iov_len) {
> -		seg->mr_page = NULL;
> +		seg->mr_page = virt_to_page(vec->iov_base);
>   		seg->mr_offset = vec->iov_base;
>   		seg->mr_len = vec->iov_len;
>   		++seg;
> diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
> index 94b28657aeeb..02971e183989 100644
> --- a/net/sunrpc/xprtrdma/xprt_rdma.h
> +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
> @@ -283,10 +283,11 @@ enum {
>   				  RPCRDMA_MAX_IOV_SEGS,
>   };
>   
> -struct rpcrdma_mr_seg {		/* chunk descriptors */
> -	u32		mr_len;		/* length of chunk or segment */
> -	struct page	*mr_page;	/* owning page, if any */
> -	char		*mr_offset;	/* kva if no page, else offset */
> +/* Arguments for DMA mapping and registration */
> +struct rpcrdma_mr_seg {
> +	u32		mr_len;		/* length of segment */
> +	struct page	*mr_page;	/* underlying struct page */
> +	char		*mr_offset;	/* IN: page offset, OUT: iova */
>   };
>   
>   /* The Send SGE array is provisioned to send a maximum size
> 
> 
>
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index baca49fe83af..13a50f77dddb 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -306,14 +306,10 @@  struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
 	if (nsegs > ep->re_max_fr_depth)
 		nsegs = ep->re_max_fr_depth;
 	for (i = 0; i < nsegs;) {
-		if (seg->mr_page)
-			sg_set_page(&mr->mr_sg[i],
-				    seg->mr_page,
-				    seg->mr_len,
-				    offset_in_page(seg->mr_offset));
-		else
-			sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
-				   seg->mr_len);
+		sg_set_page(&mr->mr_sg[i],
+			    seg->mr_page,
+			    seg->mr_len,
+			    offset_in_page(seg->mr_offset));
 
 		++seg;
 		++i;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 832765f3ebba..529adb6ad4db 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -214,7 +214,7 @@  rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 		     unsigned int *n)
 {
 	if (vec->iov_len) {
-		seg->mr_page = NULL;
+		seg->mr_page = virt_to_page(vec->iov_base);
 		seg->mr_offset = vec->iov_base;
 		seg->mr_len = vec->iov_len;
 		++seg;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 94b28657aeeb..02971e183989 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -283,10 +283,11 @@  enum {
 				  RPCRDMA_MAX_IOV_SEGS,
 };
 
-struct rpcrdma_mr_seg {		/* chunk descriptors */
-	u32		mr_len;		/* length of chunk or segment */
-	struct page	*mr_page;	/* owning page, if any */
-	char		*mr_offset;	/* kva if no page, else offset */
+/* Arguments for DMA mapping and registration */
+struct rpcrdma_mr_seg {
+	u32		mr_len;		/* length of segment */
+	struct page	*mr_page;	/* underlying struct page */
+	char		*mr_offset;	/* IN: page offset, OUT: iova */
 };
 
 /* The Send SGE array is provisioned to send a maximum size