diff mbox series

[for-next,4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq

Message ID 20230109195402.1339737-5-yanjun.zhu@intel.com (mailing list archive)
State Superseded
Headers show
Series RDMA/irdma: Refactor irdma_reg_user_mr function | expand

Commit Message

Zhu Yanjun Jan. 9, 2023, 7:54 p.m. UTC
From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with CQ handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
 1 file changed, 35 insertions(+), 25 deletions(-)

Comments

Shiraz Saleem Jan. 10, 2023, 4:12 a.m. UTC | #1
> Subject: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into
> irdma_reg_user_mr_type_cq
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with CQ handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
>  1 file changed, 35 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index e90eba73c396..b4befbafb830 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct
> irdma_mem_reg_req req,
>  	return err;
>  }
> 
> +static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
> +				     struct irdma_mr *iwmr,
> +				     struct ib_udata *udata,
> +				     struct irdma_mem_reg_req req)

I would keep the order of these API args same as the one for irdma_reg_user_mr_type_qp.

> +{
> +	int err = 0;

No need to initialize.

> +	u8 shadow_pgcnt = 1;
> +	bool use_pbles = false;

No need to initialize use_pbles.

> +	struct irdma_ucontext *ucontext;
> +	unsigned long flags;
> +	u32 total;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> +		shadow_pgcnt = 0;
> +	total = req.cq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	use_pbles = (req.cq_pages > 1);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
> +
> +	return err;
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,  {  #define IRDMA_MEM_REG_MIN_REQ_LEN
> offsetofend(struct irdma_mem_reg_req, sq_pages)
>  	struct irdma_device *iwdev = to_iwdev(pd->device);
> -	struct irdma_ucontext *ucontext;
> -	struct irdma_pbl *iwpbl;
>  	struct irdma_mr *iwmr;
>  	struct ib_umem *region;
>  	struct irdma_mem_reg_req req;
> -	u32 total;
> -	u8 shadow_pgcnt = 1;
> -	bool use_pbles = false;
> -	unsigned long flags;
>  	int err = -EINVAL;

Do we need to initialize err here too? Probably separate from this patch but could clean up.

> 
>  	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
> @@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
> u64 start, u64 len,
>  		return (struct ib_mr *)iwmr;
>  	}
> 
> -	iwpbl = &iwmr->iwpbl;
> -
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
>  		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr); @@ -
> 2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64
> start, u64 len,
> 
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> -			shadow_pgcnt = 0;
> -		total = req.cq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -
> -		use_pbles = (req.cq_pages > 1);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
>  		if (err)
>  			goto error;
> -
> -		ucontext = rdma_udata_to_drv_context(udata, struct
> irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_MEM:
>  		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
> --
> 2.31.1
Zhu Yanjun Jan. 11, 2023, 6:23 a.m. UTC | #2
在 2023/1/10 12:12, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into
>> irdma_reg_user_mr_type_cq
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> Split the source codes related with CQ handling into a new function.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
>>   1 file changed, 35 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index e90eba73c396..b4befbafb830 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct
>> irdma_mem_reg_req req,
>>   	return err;
>>   }
>>
>> +static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
>> +				     struct irdma_mr *iwmr,
>> +				     struct ib_udata *udata,
>> +				     struct irdma_mem_reg_req req)
> I would keep the order of these API args same as the one for irdma_reg_user_mr_type_qp.
Got it.
>
>> +{
>> +	int err = 0;
> No need to initialize.
Got it.
>
>> +	u8 shadow_pgcnt = 1;
>> +	bool use_pbles = false;
> No need to initialize use_pbles.
Got it.
>
>> +	struct irdma_ucontext *ucontext;
>> +	unsigned long flags;
>> +	u32 total;
>> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> +		shadow_pgcnt = 0;
>> +	total = req.cq_pages + shadow_pgcnt;
>> +	if (total > iwmr->page_cnt)
>> +		return -EINVAL;
>> +
>> +	use_pbles = (req.cq_pages > 1);
>> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +	if (err)
>> +		return err;
>> +
>> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> +					     ibucontext);
>> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> +	iwpbl->on_list = true;
>> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> +
>> +	return err;
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,  {  #define IRDMA_MEM_REG_MIN_REQ_LEN
>> offsetofend(struct irdma_mem_reg_req, sq_pages)
>>   	struct irdma_device *iwdev = to_iwdev(pd->device);
>> -	struct irdma_ucontext *ucontext;
>> -	struct irdma_pbl *iwpbl;
>>   	struct irdma_mr *iwmr;
>>   	struct ib_umem *region;
>>   	struct irdma_mem_reg_req req;
>> -	u32 total;
>> -	u8 shadow_pgcnt = 1;
>> -	bool use_pbles = false;
>> -	unsigned long flags;
>>   	int err = -EINVAL;
> Do we need to initialize err here too? Probably separate from this patch but could clean up.

Yes. In switch-case-default, in default branch, err is not assigned to 
any error.

In the latest commit, err is assigned to -EINVAL in default branch and 
here err is not set.

I will send out the latest commits very soon.

Zhu Yanjun

>
>>   	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>> @@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
>> u64 start, u64 len,
>>   		return (struct ib_mr *)iwmr;
>>   	}
>>
>> -	iwpbl = &iwmr->iwpbl;
>> -
>>   	switch (req.reg_type) {
>>   	case IRDMA_MEMREG_TYPE_QP:
>>   		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr); @@ -
>> 2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64
>> start, u64 len,
>>
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_CQ:
>> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> -			shadow_pgcnt = 0;
>> -		total = req.cq_pages + shadow_pgcnt;
>> -		if (total > iwmr->page_cnt) {
>> -			err = -EINVAL;
>> -			goto error;
>> -		}
>> -
>> -		use_pbles = (req.cq_pages > 1);
>> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
>>   		if (err)
>>   			goto error;
>> -
>> -		ucontext = rdma_udata_to_drv_context(udata, struct
>> irdma_ucontext,
>> -						     ibucontext);
>> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> -		iwpbl->on_list = true;
>> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_MEM:
>>   		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
>> --
>> 2.31.1
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index e90eba73c396..b4befbafb830 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2864,6 +2864,40 @@  static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
 	return err;
 }
 
+static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
+				     struct irdma_mr *iwmr,
+				     struct ib_udata *udata,
+				     struct irdma_mem_reg_req req)
+{
+	int err = 0;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles = false;
+	struct irdma_ucontext *ucontext;
+	unsigned long flags;
+	u32 total;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+		shadow_pgcnt = 0;
+	total = req.cq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	use_pbles = (req.cq_pages > 1);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2879,15 +2913,9 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 {
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
-	struct irdma_ucontext *ucontext;
-	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total;
-	u8 shadow_pgcnt = 1;
-	bool use_pbles = false;
-	unsigned long flags;
 	int err = -EINVAL;
 
 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
@@ -2915,8 +2943,6 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return (struct ib_mr *)iwmr;
 	}
 
-	iwpbl = &iwmr->iwpbl;
-
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
 		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
@@ -2925,25 +2951,9 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
-		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-			shadow_pgcnt = 0;
-		total = req.cq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-
-		use_pbles = (req.cq_pages > 1);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
 		if (err)
 			goto error;
-
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
 		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);