diff mbox series

[for-next,3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp

Message ID 20230109195402.1339737-4-yanjun.zhu@intel.com (mailing list archive)
State Superseded
Headers show
Series RDMA/irdma: Refactor irdma_reg_user_mr function | expand

Commit Message

Zhu Yanjun Jan. 9, 2023, 7:54 p.m. UTC
From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with QP handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
 1 file changed, 34 insertions(+), 14 deletions(-)

Comments

Shiraz Saleem Jan. 10, 2023, 4:10 a.m. UTC | #1
> Subject: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into
> irdma_reg_user_mr_type_qp
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with QP handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>  1 file changed, 34 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index 287d4313f14d..e90eba73c396 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2831,6 +2831,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>  	kfree(iwmr);
>  }
> 
> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
> +				     struct irdma_device *iwdev,
> +				     struct ib_udata *udata,
> +				     struct irdma_mr *iwmr)

You could omit iwdev and compute it from iwmr.

> +{
> +	u32 total;
> +	int err = 0;

No need to initialize.

> +	u8 shadow_pgcnt = 1;

> +	bool use_pbles = false;


No need to initialize use_pbles.


> +	unsigned long flags;
> +	struct irdma_ucontext *ucontext;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	total = req.sq_pages + req.rq_pages;
> +	use_pbles = (total > 2);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
> +
> +	return err;
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2886,23 +2919,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,
> 
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
> -		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -		total = req.sq_pages + req.rq_pages;
> -		use_pbles = (total > 2);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
>  		if (err)
>  			goto error;
> 
> -		ucontext = rdma_udata_to_drv_context(udata, struct
> irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
>  		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> --
> 2.31.1
Zhu Yanjun Jan. 11, 2023, 6:11 a.m. UTC | #2
在 2023/1/10 12:10, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into
>> irdma_reg_user_mr_type_qp
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> Split the source codes related with QP handling into a new function.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>>   1 file changed, 34 insertions(+), 14 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index 287d4313f14d..e90eba73c396 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2831,6 +2831,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>>   	kfree(iwmr);
>>   }
>>
>> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>> +				     struct irdma_device *iwdev,
>> +				     struct ib_udata *udata,
>> +				     struct irdma_mr *iwmr)
> You could omit iwdev and compute it from iwmr.


Got it.


>
>> +{
>> +	u32 total;
>> +	int err = 0;
> No need to initialize.
Got it.
>
>> +	u8 shadow_pgcnt = 1;
>> +	bool use_pbles = false;
>
> No need to initialize use_pbles.

Thanks. I will send out the latest commits very soon.

Zhu Yanjun

>
>
>> +	unsigned long flags;
>> +	struct irdma_ucontext *ucontext;
>> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> +	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> +	if (total > iwmr->page_cnt)
>> +		return -EINVAL;
>> +
>> +	total = req.sq_pages + req.rq_pages;
>> +	use_pbles = (total > 2);
>> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +	if (err)
>> +		return err;
>> +
>> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> +					     ibucontext);
>> +	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> +	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> +	iwpbl->on_list = true;
>> +	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>> +
>> +	return err;
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2886,23 +2919,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,
>>
>>   	switch (req.reg_type) {
>>   	case IRDMA_MEMREG_TYPE_QP:
>> -		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> -		if (total > iwmr->page_cnt) {
>> -			err = -EINVAL;
>> -			goto error;
>> -		}
>> -		total = req.sq_pages + req.rq_pages;
>> -		use_pbles = (total > 2);
>> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
>>   		if (err)
>>   			goto error;
>>
>> -		ucontext = rdma_udata_to_drv_context(udata, struct
>> irdma_ucontext,
>> -						     ibucontext);
>> -		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> -		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> -		iwpbl->on_list = true;
>> -		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_CQ:
>>   		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> --
>> 2.31.1
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 287d4313f14d..e90eba73c396 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2831,6 +2831,39 @@  static void irdma_free_iwmr(struct irdma_mr *iwmr)
 	kfree(iwmr);
 }
 
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+				     struct irdma_device *iwdev,
+				     struct ib_udata *udata,
+				     struct irdma_mr *iwmr)
+{
+	u32 total;
+	int err = 0;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles = false;
+	unsigned long flags;
+	struct irdma_ucontext *ucontext;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	total = req.sq_pages + req.rq_pages;
+	use_pbles = (total > 2);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2886,23 +2919,10 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
-		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-		total = req.sq_pages + req.rq_pages;
-		use_pbles = (total > 2);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
 		if (err)
 			goto error;
 
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)