diff mbox series

[v2] RDMA/hns: fix return value in hns_roce_map_mr_sg

Message ID 20240411033851.2884771-1-shaozhengchao@huawei.com (mailing list archive)
State Accepted
Headers show
Series [v2] RDMA/hns: fix return value in hns_roce_map_mr_sg | expand

Commit Message

shaozhengchao April 11, 2024, 3:38 a.m. UTC
As described in the ib_map_mr_sg function comment, it returns the number
of sg elements that were mapped to the memory region. However,
hns_roce_map_mr_sg returns the number of pages required for mapping the
DMA area. Fix it.

Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process")
Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
---
v2: fix the return value and coding format issues
---
 drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

Comments

Junxian Huang April 11, 2024, 5:59 a.m. UTC | #1
On 2024/4/11 11:38, Zhengchao Shao wrote:
> As described in the ib_map_mr_sg function comment, it returns the number
> of sg elements that were mapped to the memory region. However,
> hns_roce_map_mr_sg returns the number of pages required for mapping the
> DMA area. Fix it.
> 
> Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process")
> Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>

Thanks,

Reviewed-by: Junxian Huang <huangjunxian6@hisilicon.com>

> ---
> v2: fix the return value and coding format issues
> ---
>  drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++--------
>  1 file changed, 7 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
> index 9e05b57a2d67..80c050d7d0ea 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_mr.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
> @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
>  	struct ib_device *ibdev = &hr_dev->ib_dev;
>  	struct hns_roce_mr *mr = to_hr_mr(ibmr);
>  	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
> -	int ret = 0;
> +	int ret, sg_num = 0;
>  
>  	mr->npages = 0;
>  	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
>  				 sizeof(dma_addr_t), GFP_KERNEL);
>  	if (!mr->page_list)
> -		return ret;
> +		return sg_num;
>  
> -	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
> -	if (ret < 1) {
> +	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
> +	if (sg_num < 1) {
>  		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
> -			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
> +			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
>  		goto err_page_list;
>  	}
>  
> @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
>  	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
>  	if (ret) {
>  		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
> -		ret = 0;
> +		sg_num = 0;
>  	} else {
>  		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
> -		ret = mr->npages;
>  	}
>  
>  err_page_list:
>  	kvfree(mr->page_list);
>  	mr->page_list = NULL;
>  
> -	return ret;
> +	return sg_num;
>  }
>  
>  static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
Zhu Yanjun April 11, 2024, 6:08 a.m. UTC | #2
在 2024/4/11 5:38, Zhengchao Shao 写道:
> As described in the ib_map_mr_sg function comment, it returns the number
> of sg elements that were mapped to the memory region. However,
> hns_roce_map_mr_sg returns the number of pages required for mapping the
> DMA area. Fix it.
> 
> Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process")
> Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
> ---
> v2: fix the return value and coding format issues
> ---
>   drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++--------
>   1 file changed, 7 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
> index 9e05b57a2d67..80c050d7d0ea 100644
> --- a/drivers/infiniband/hw/hns/hns_roce_mr.c
> +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
> @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
>   	struct ib_device *ibdev = &hr_dev->ib_dev;
>   	struct hns_roce_mr *mr = to_hr_mr(ibmr);
>   	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
> -	int ret = 0;
> +	int ret, sg_num = 0;
>   
>   	mr->npages = 0;
>   	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
>   				 sizeof(dma_addr_t), GFP_KERNEL);
>   	if (!mr->page_list)
> -		return ret;
> +		return sg_num;
>   
> -	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
> -	if (ret < 1) {
> +	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
> +	if (sg_num < 1) {
>   		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
> -			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
> +			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
>   		goto err_page_list;
>   	}
>   
> @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
>   	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
>   	if (ret) {
>   		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
> -		ret = 0;
> +		sg_num = 0;
>   	} else {
>   		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
> -		ret = mr->npages;
>   	}

In the above, can we replace the local variable ret with sg_num? So the 
local variable ret can be removed.
A trivial problem.

@@ -433,7 +433,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
scatterlist *sg, int sg_nents,
         struct ib_device *ibdev = &hr_dev->ib_dev;
         struct hns_roce_mr *mr = to_hr_mr(ibmr);
         struct hns_roce_mtr *mtr = &mr->pbl_mtr;
-       int ret, sg_num = 0;
+       int sg_num = 0;

         mr->npages = 0;
         mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
@@ -452,9 +452,9 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
scatterlist *sg, int sg_nents,
         mtr->hem_cfg.region[0].count = mr->npages;
         mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
         mtr->hem_cfg.region_count = 1;
-       ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
-       if (ret) {
-               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+       sg_num = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+       if (sg_num) {
+               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", 
sg_num);
                 sg_num = 0;
         } else {
                 mr->pbl_mtr.hem_cfg.buf_pg_shift = 
(u32)ilog2(ibmr->page_size);

Zhu Yanjun

>   
>   err_page_list:
>   	kvfree(mr->page_list);
>   	mr->page_list = NULL;
>   
> -	return ret;
> +	return sg_num;
>   }
>   
>   static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
shaozhengchao April 11, 2024, 6:26 a.m. UTC | #3
On 2024/4/11 14:08, Zhu Yanjun wrote:
> 在 2024/4/11 5:38, Zhengchao Shao 写道:
>> As described in the ib_map_mr_sg function comment, it returns the number
>> of sg elements that were mapped to the memory region. However,
>> hns_roce_map_mr_sg returns the number of pages required for mapping the
>> DMA area. Fix it.
>>
>> Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation process")
>> Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
>> ---
>> v2: fix the return value and coding format issues
>> ---
>>   drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++--------
>>   1 file changed, 7 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c 
>> b/drivers/infiniband/hw/hns/hns_roce_mr.c
>> index 9e05b57a2d67..80c050d7d0ea 100644
>> --- a/drivers/infiniband/hw/hns/hns_roce_mr.c
>> +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
>> @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, 
>> struct scatterlist *sg, int sg_nents,
>>       struct ib_device *ibdev = &hr_dev->ib_dev;
>>       struct hns_roce_mr *mr = to_hr_mr(ibmr);
>>       struct hns_roce_mtr *mtr = &mr->pbl_mtr;
>> -    int ret = 0;
>> +    int ret, sg_num = 0;
>>       mr->npages = 0;
>>       mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
>>                    sizeof(dma_addr_t), GFP_KERNEL);
>>       if (!mr->page_list)
>> -        return ret;
>> +        return sg_num;
>> -    ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 
>> hns_roce_set_page);
>> -    if (ret < 1) {
>> +    sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 
>> hns_roce_set_page);
>> +    if (sg_num < 1) {
>>           ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
>> -              mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
>> +              mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
>>           goto err_page_list;
>>       }
>> @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, 
>> struct scatterlist *sg, int sg_nents,
>>       ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
>>       if (ret) {
>>           ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
>> -        ret = 0;
>> +        sg_num = 0;
>>       } else {
>>           mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
>> -        ret = mr->npages;
>>       }
> 
Hi Yanjun:
   Thank you for your review. The hns_roce_mtr_map function indicates
whether the page is successfully mapped. If sg_num is used, there may be
ambiguity. Maybe what do I missed?

Zhengchao Shao
> In the above, can we replace the local variable ret with sg_num? So the 
> local variable ret can be removed.
> A trivial problem.
> 
> @@ -433,7 +433,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
> scatterlist *sg, int sg_nents,
>          struct ib_device *ibdev = &hr_dev->ib_dev;
>          struct hns_roce_mr *mr = to_hr_mr(ibmr);
>          struct hns_roce_mtr *mtr = &mr->pbl_mtr;
> -       int ret, sg_num = 0;
> +       int sg_num = 0;
> 
>          mr->npages = 0;
>          mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
> @@ -452,9 +452,9 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
> scatterlist *sg, int sg_nents,
>          mtr->hem_cfg.region[0].count = mr->npages;
>          mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
>          mtr->hem_cfg.region_count = 1;
> -       ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
> -       if (ret) {
> -               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
> +       sg_num = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
> +       if (sg_num) {
> +               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", 
> sg_num);
>                  sg_num = 0;
>          } else {
>                  mr->pbl_mtr.hem_cfg.buf_pg_shift = 
> (u32)ilog2(ibmr->page_size);
> 
> Zhu Yanjun
> 
>>   err_page_list:
>>       kvfree(mr->page_list);
>>       mr->page_list = NULL;
>> -    return ret;
>> +    return sg_num;
>>   }
>>   static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
>
Zhu Yanjun April 11, 2024, 7:05 a.m. UTC | #4
在 2024/4/11 8:26, shaozhengchao 写道:
>
>
> On 2024/4/11 14:08, Zhu Yanjun wrote:
>> 在 2024/4/11 5:38, Zhengchao Shao 写道:
>>> As described in the ib_map_mr_sg function comment, it returns the 
>>> number
>>> of sg elements that were mapped to the memory region. However,
>>> hns_roce_map_mr_sg returns the number of pages required for mapping the
>>> DMA area. Fix it.
>>>
>>> Fixes: 9b2cf76c9f05 ("RDMA/hns: Optimize PBL buffer allocation 
>>> process")
>>> Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
>>> ---
>>> v2: fix the return value and coding format issues
>>> ---
>>>   drivers/infiniband/hw/hns/hns_roce_mr.c | 15 +++++++--------
>>>   1 file changed, 7 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c 
>>> b/drivers/infiniband/hw/hns/hns_roce_mr.c
>>> index 9e05b57a2d67..80c050d7d0ea 100644
>>> --- a/drivers/infiniband/hw/hns/hns_roce_mr.c
>>> +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
>>> @@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, 
>>> struct scatterlist *sg, int sg_nents,
>>>       struct ib_device *ibdev = &hr_dev->ib_dev;
>>>       struct hns_roce_mr *mr = to_hr_mr(ibmr);
>>>       struct hns_roce_mtr *mtr = &mr->pbl_mtr;
>>> -    int ret = 0;
>>> +    int ret, sg_num = 0;
>>>       mr->npages = 0;
>>>       mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
>>>                    sizeof(dma_addr_t), GFP_KERNEL);
>>>       if (!mr->page_list)
>>> -        return ret;
>>> +        return sg_num;
>>> -    ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 
>>> hns_roce_set_page);
>>> -    if (ret < 1) {
>>> +    sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, 
>>> hns_roce_set_page);
>>> +    if (sg_num < 1) {
>>>           ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = 
>>> %d.\n",
>>> -              mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
>>> +              mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
>>>           goto err_page_list;
>>>       }
>>> @@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, 
>>> struct scatterlist *sg, int sg_nents,
>>>       ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
>>>       if (ret) {
>>>           ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
>>> -        ret = 0;
>>> +        sg_num = 0;
>>>       } else {
>>>           mr->pbl_mtr.hem_cfg.buf_pg_shift = 
>>> (u32)ilog2(ibmr->page_size);
>>> -        ret = mr->npages;
>>>       }
>>
> Hi Yanjun:
>   Thank you for your review. The hns_roce_mtr_map function indicates
> whether the page is successfully mapped. If sg_num is used, there may be
> ambiguity. Maybe what do I missed?

Sure. From my perspective, I just want to remove a local variable. You 
consideration also makes sense.

Zhu Yanjun

>
> Zhengchao Shao
>> In the above, can we replace the local variable ret with sg_num? So 
>> the local variable ret can be removed.
>> A trivial problem.
>>
>> @@ -433,7 +433,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
>> scatterlist *sg, int sg_nents,
>>          struct ib_device *ibdev = &hr_dev->ib_dev;
>>          struct hns_roce_mr *mr = to_hr_mr(ibmr);
>>          struct hns_roce_mtr *mtr = &mr->pbl_mtr;
>> -       int ret, sg_num = 0;
>> +       int sg_num = 0;
>>
>>          mr->npages = 0;
>>          mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
>> @@ -452,9 +452,9 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct 
>> scatterlist *sg, int sg_nents,
>>          mtr->hem_cfg.region[0].count = mr->npages;
>>          mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
>>          mtr->hem_cfg.region_count = 1;
>> -       ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
>> -       if (ret) {
>> -               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", 
>> ret);
>> +       sg_num = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, 
>> mr->npages);
>> +       if (sg_num) {
>> +               ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", 
>> sg_num);
>>                  sg_num = 0;
>>          } else {
>>                  mr->pbl_mtr.hem_cfg.buf_pg_shift = 
>> (u32)ilog2(ibmr->page_size);
>>
>> Zhu Yanjun
>>
>>>   err_page_list:
>>>       kvfree(mr->page_list);
>>>       mr->page_list = NULL;
>>> -    return ret;
>>> +    return sg_num;
>>>   }
>>>   static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
>>
Leon Romanovsky April 16, 2024, 11:59 a.m. UTC | #5
On Thu, 11 Apr 2024 11:38:51 +0800, Zhengchao Shao wrote:
> As described in the ib_map_mr_sg function comment, it returns the number
> of sg elements that were mapped to the memory region. However,
> hns_roce_map_mr_sg returns the number of pages required for mapping the
> DMA area. Fix it.
> 
> 

Applied, thanks!

[1/1] RDMA/hns: fix return value in hns_roce_map_mr_sg
      https://git.kernel.org/rdma/rdma/c/203b70fda63425

Best regards,
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 9e05b57a2d67..80c050d7d0ea 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -441,18 +441,18 @@  int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 	struct ib_device *ibdev = &hr_dev->ib_dev;
 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
 	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
-	int ret = 0;
+	int ret, sg_num = 0;
 
 	mr->npages = 0;
 	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
 				 sizeof(dma_addr_t), GFP_KERNEL);
 	if (!mr->page_list)
-		return ret;
+		return sg_num;
 
-	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
-	if (ret < 1) {
+	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+	if (sg_num < 1) {
 		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
-			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
 		goto err_page_list;
 	}
 
@@ -463,17 +463,16 @@  int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 	ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
 	if (ret) {
 		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
-		ret = 0;
+		sg_num = 0;
 	} else {
 		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
-		ret = mr->npages;
 	}
 
 err_page_list:
 	kvfree(mr->page_list);
 	mr->page_list = NULL;
 
-	return ret;
+	return sg_num;
 }
 
 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,