diff mbox series

[v2,3/6] xprtrdma: Refactor invocations of offset_in_page()

Message ID 161236944700.1030487.6859398915626711523.stgit@manet.1015granger.net (mailing list archive)
State Superseded
Headers show
Series RPC/RDMA client fixes | expand

Commit Message

Chuck Lever Feb. 3, 2021, 4:24 p.m. UTC
Clean up so that offset_in_page() is invoked less often in the
most common case, which is mapping xdr->pages.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/frwr_ops.c  |    8 +++-----
 net/sunrpc/xprtrdma/rpc_rdma.c  |    4 ++--
 net/sunrpc/xprtrdma/xprt_rdma.h |    2 +-
 3 files changed, 6 insertions(+), 8 deletions(-)

Comments

Tom Talpey Feb. 3, 2021, 6:09 p.m. UTC | #1
This looks good, but the earlier 1/6 patch depends on the offset_in_page
conversion in rpcrdma_convert_kvec. Won't that complicate any bisection?

Reviewed-By: Tom Talpey <tom@talpey.com>

On 2/3/2021 11:24 AM, Chuck Lever wrote:
> Clean up so that offset_in_page() is invoked less often in the
> most common case, which is mapping xdr->pages.
> 
> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
>   net/sunrpc/xprtrdma/frwr_ops.c  |    8 +++-----
>   net/sunrpc/xprtrdma/rpc_rdma.c  |    4 ++--
>   net/sunrpc/xprtrdma/xprt_rdma.h |    2 +-
>   3 files changed, 6 insertions(+), 8 deletions(-)
> 
> diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
> index 13a50f77dddb..766a1048a48a 100644
> --- a/net/sunrpc/xprtrdma/frwr_ops.c
> +++ b/net/sunrpc/xprtrdma/frwr_ops.c
> @@ -306,16 +306,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
>   	if (nsegs > ep->re_max_fr_depth)
>   		nsegs = ep->re_max_fr_depth;
>   	for (i = 0; i < nsegs;) {
> -		sg_set_page(&mr->mr_sg[i],
> -			    seg->mr_page,
> -			    seg->mr_len,
> -			    offset_in_page(seg->mr_offset));
> +		sg_set_page(&mr->mr_sg[i], seg->mr_page,
> +			    seg->mr_len, seg->mr_offset);
>   
>   		++seg;
>   		++i;
>   		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
>   			continue;
> -		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
> +		if ((i < nsegs && seg->mr_offset) ||
>   		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
>   			break;
>   	}
> diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
> index 529adb6ad4db..b3e66b8f65ab 100644
> --- a/net/sunrpc/xprtrdma/rpc_rdma.c
> +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
> @@ -215,7 +215,7 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
>   {
>   	if (vec->iov_len) {
>   		seg->mr_page = virt_to_page(vec->iov_base);
> -		seg->mr_offset = vec->iov_base;
> +		seg->mr_offset = offset_in_page(vec->iov_base);
>   		seg->mr_len = vec->iov_len;
>   		++seg;
>   		++(*n);
> @@ -248,7 +248,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
>   	page_base = offset_in_page(xdrbuf->page_base);
>   	while (len) {
>   		seg->mr_page = *ppages;
> -		seg->mr_offset = (char *)page_base;
> +		seg->mr_offset = page_base;
>   		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
>   		len -= seg->mr_len;
>   		++ppages;
> diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
> index 02971e183989..ed1c5444fb9d 100644
> --- a/net/sunrpc/xprtrdma/xprt_rdma.h
> +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
> @@ -287,7 +287,7 @@ enum {
>   struct rpcrdma_mr_seg {
>   	u32		mr_len;		/* length of segment */
>   	struct page	*mr_page;	/* underlying struct page */
> -	char		*mr_offset;	/* IN: page offset, OUT: iova */
> +	u64		mr_offset;	/* IN: page offset, OUT: iova */
>   };
>   
>   /* The Send SGE array is provisioned to send a maximum size
> 
> 
>
Chuck Lever Feb. 3, 2021, 6:11 p.m. UTC | #2
> On Feb 3, 2021, at 1:09 PM, Tom Talpey <tom@talpey.com> wrote:
> 
> This looks good, but the earlier 1/6 patch depends on the offset_in_page
> conversion in rpcrdma_convert_kvec.

I don't think it does... sg_set_buf() handles the offset_in_page() calculation
in that case.


> Won't that complicate any bisection?
> 
> Reviewed-By: Tom Talpey <tom@talpey.com>
> 
> On 2/3/2021 11:24 AM, Chuck Lever wrote:
>> Clean up so that offset_in_page() is invoked less often in the
>> most common case, which is mapping xdr->pages.
>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>> ---
>>  net/sunrpc/xprtrdma/frwr_ops.c  |    8 +++-----
>>  net/sunrpc/xprtrdma/rpc_rdma.c  |    4 ++--
>>  net/sunrpc/xprtrdma/xprt_rdma.h |    2 +-
>>  3 files changed, 6 insertions(+), 8 deletions(-)
>> diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
>> index 13a50f77dddb..766a1048a48a 100644
>> --- a/net/sunrpc/xprtrdma/frwr_ops.c
>> +++ b/net/sunrpc/xprtrdma/frwr_ops.c
>> @@ -306,16 +306,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
>>  	if (nsegs > ep->re_max_fr_depth)
>>  		nsegs = ep->re_max_fr_depth;
>>  	for (i = 0; i < nsegs;) {
>> -		sg_set_page(&mr->mr_sg[i],
>> -			    seg->mr_page,
>> -			    seg->mr_len,
>> -			    offset_in_page(seg->mr_offset));
>> +		sg_set_page(&mr->mr_sg[i], seg->mr_page,
>> +			    seg->mr_len, seg->mr_offset);
>>    		++seg;
>>  		++i;
>>  		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
>>  			continue;
>> -		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
>> +		if ((i < nsegs && seg->mr_offset) ||
>>  		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
>>  			break;
>>  	}
>> diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
>> index 529adb6ad4db..b3e66b8f65ab 100644
>> --- a/net/sunrpc/xprtrdma/rpc_rdma.c
>> +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
>> @@ -215,7 +215,7 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
>>  {
>>  	if (vec->iov_len) {
>>  		seg->mr_page = virt_to_page(vec->iov_base);
>> -		seg->mr_offset = vec->iov_base;
>> +		seg->mr_offset = offset_in_page(vec->iov_base);
>>  		seg->mr_len = vec->iov_len;
>>  		++seg;
>>  		++(*n);
>> @@ -248,7 +248,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
>>  	page_base = offset_in_page(xdrbuf->page_base);
>>  	while (len) {
>>  		seg->mr_page = *ppages;
>> -		seg->mr_offset = (char *)page_base;
>> +		seg->mr_offset = page_base;
>>  		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
>>  		len -= seg->mr_len;
>>  		++ppages;
>> diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
>> index 02971e183989..ed1c5444fb9d 100644
>> --- a/net/sunrpc/xprtrdma/xprt_rdma.h
>> +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
>> @@ -287,7 +287,7 @@ enum {
>>  struct rpcrdma_mr_seg {
>>  	u32		mr_len;		/* length of segment */
>>  	struct page	*mr_page;	/* underlying struct page */
>> -	char		*mr_offset;	/* IN: page offset, OUT: iova */
>> +	u64		mr_offset;	/* IN: page offset, OUT: iova */
>>  };
>>    /* The Send SGE array is provisioned to send a maximum size

--
Chuck Lever
Tom Talpey Feb. 3, 2021, 6:19 p.m. UTC | #3
On 2/3/2021 1:11 PM, Chuck Lever wrote:
> 
> 
>> On Feb 3, 2021, at 1:09 PM, Tom Talpey <tom@talpey.com> wrote:
>>
>> This looks good, but the earlier 1/6 patch depends on the offset_in_page
>> conversion in rpcrdma_convert_kvec.
> 
> I don't think it does... sg_set_buf() handles the offset_in_page() calculation
> in that case.

Ah, ok. And offset_in_page can be applied repeatedly, as well.

Tom.

>> Won't that complicate any bisection?
>>
>> Reviewed-By: Tom Talpey <tom@talpey.com>
>>
>> On 2/3/2021 11:24 AM, Chuck Lever wrote:
>>> Clean up so that offset_in_page() is invoked less often in the
>>> most common case, which is mapping xdr->pages.
>>> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>>> ---
>>>   net/sunrpc/xprtrdma/frwr_ops.c  |    8 +++-----
>>>   net/sunrpc/xprtrdma/rpc_rdma.c  |    4 ++--
>>>   net/sunrpc/xprtrdma/xprt_rdma.h |    2 +-
>>>   3 files changed, 6 insertions(+), 8 deletions(-)
>>> diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
>>> index 13a50f77dddb..766a1048a48a 100644
>>> --- a/net/sunrpc/xprtrdma/frwr_ops.c
>>> +++ b/net/sunrpc/xprtrdma/frwr_ops.c
>>> @@ -306,16 +306,14 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
>>>   	if (nsegs > ep->re_max_fr_depth)
>>>   		nsegs = ep->re_max_fr_depth;
>>>   	for (i = 0; i < nsegs;) {
>>> -		sg_set_page(&mr->mr_sg[i],
>>> -			    seg->mr_page,
>>> -			    seg->mr_len,
>>> -			    offset_in_page(seg->mr_offset));
>>> +		sg_set_page(&mr->mr_sg[i], seg->mr_page,
>>> +			    seg->mr_len, seg->mr_offset);
>>>     		++seg;
>>>   		++i;
>>>   		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
>>>   			continue;
>>> -		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
>>> +		if ((i < nsegs && seg->mr_offset) ||
>>>   		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
>>>   			break;
>>>   	}
>>> diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
>>> index 529adb6ad4db..b3e66b8f65ab 100644
>>> --- a/net/sunrpc/xprtrdma/rpc_rdma.c
>>> +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
>>> @@ -215,7 +215,7 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
>>>   {
>>>   	if (vec->iov_len) {
>>>   		seg->mr_page = virt_to_page(vec->iov_base);
>>> -		seg->mr_offset = vec->iov_base;
>>> +		seg->mr_offset = offset_in_page(vec->iov_base);
>>>   		seg->mr_len = vec->iov_len;
>>>   		++seg;
>>>   		++(*n);
>>> @@ -248,7 +248,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
>>>   	page_base = offset_in_page(xdrbuf->page_base);
>>>   	while (len) {
>>>   		seg->mr_page = *ppages;
>>> -		seg->mr_offset = (char *)page_base;
>>> +		seg->mr_offset = page_base;
>>>   		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
>>>   		len -= seg->mr_len;
>>>   		++ppages;
>>> diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
>>> index 02971e183989..ed1c5444fb9d 100644
>>> --- a/net/sunrpc/xprtrdma/xprt_rdma.h
>>> +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
>>> @@ -287,7 +287,7 @@ enum {
>>>   struct rpcrdma_mr_seg {
>>>   	u32		mr_len;		/* length of segment */
>>>   	struct page	*mr_page;	/* underlying struct page */
>>> -	char		*mr_offset;	/* IN: page offset, OUT: iova */
>>> +	u64		mr_offset;	/* IN: page offset, OUT: iova */
>>>   };
>>>     /* The Send SGE array is provisioned to send a maximum size
> 
> --
> Chuck Lever
> 
> 
> 
>
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 13a50f77dddb..766a1048a48a 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -306,16 +306,14 @@  struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
 	if (nsegs > ep->re_max_fr_depth)
 		nsegs = ep->re_max_fr_depth;
 	for (i = 0; i < nsegs;) {
-		sg_set_page(&mr->mr_sg[i],
-			    seg->mr_page,
-			    seg->mr_len,
-			    offset_in_page(seg->mr_offset));
+		sg_set_page(&mr->mr_sg[i], seg->mr_page,
+			    seg->mr_len, seg->mr_offset);
 
 		++seg;
 		++i;
 		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
 			continue;
-		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
+		if ((i < nsegs && seg->mr_offset) ||
 		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
 			break;
 	}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 529adb6ad4db..b3e66b8f65ab 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -215,7 +215,7 @@  rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
 {
 	if (vec->iov_len) {
 		seg->mr_page = virt_to_page(vec->iov_base);
-		seg->mr_offset = vec->iov_base;
+		seg->mr_offset = offset_in_page(vec->iov_base);
 		seg->mr_len = vec->iov_len;
 		++seg;
 		++(*n);
@@ -248,7 +248,7 @@  rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
 	page_base = offset_in_page(xdrbuf->page_base);
 	while (len) {
 		seg->mr_page = *ppages;
-		seg->mr_offset = (char *)page_base;
+		seg->mr_offset = page_base;
 		seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
 		len -= seg->mr_len;
 		++ppages;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 02971e183989..ed1c5444fb9d 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -287,7 +287,7 @@  enum {
 struct rpcrdma_mr_seg {
 	u32		mr_len;		/* length of segment */
 	struct page	*mr_page;	/* underlying struct page */
-	char		*mr_offset;	/* IN: page offset, OUT: iova */
+	u64		mr_offset;	/* IN: page offset, OUT: iova */
 };
 
 /* The Send SGE array is provisioned to send a maximum size