diff mbox series

[PATCHv2,for-next,4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq

Message ID 20230112000617.1659337-5-yanjun.zhu@intel.com (mailing list archive)
State Superseded
Headers show
Series RDMA/irdma: Refactor irdma_reg_user_mr function | expand

Commit Message

Zhu Yanjun Jan. 12, 2023, 12:06 a.m. UTC
From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with CQ handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
 1 file changed, 37 insertions(+), 26 deletions(-)

Comments

Leon Romanovsky Jan. 15, 2023, 11:28 a.m. UTC | #1
On Wed, Jan 11, 2023 at 07:06:17PM -0500, Zhu Yanjun wrote:
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with CQ handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
>  1 file changed, 37 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index 74dd1972c325..3902c74d59f2 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>  	return err;
>  }
>  
> +static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
> +				     struct ib_udata *udata,
> +				     struct irdma_mr *iwmr)
> +{
> +	int err;
> +	u8 shadow_pgcnt = 1;
> +	bool use_pbles;
> +	struct irdma_ucontext *ucontext;
> +	unsigned long flags;
> +	u32 total;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);

It will be nice to see more structured variable initialization.

I'm not going to insist on it, but IMHO netdev reverse Christmas
tree rule looks more appealing than this random list.

> +
> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
> +		shadow_pgcnt = 0;
> +	total = req.cq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	use_pbles = (req.cq_pages > 1);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
> +
> +	return err;

return 0;

> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  {
>  #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
>  	struct irdma_device *iwdev = to_iwdev(pd->device);
> -	struct irdma_ucontext *ucontext;
> -	struct irdma_pbl *iwpbl;
>  	struct irdma_mr *iwmr;
>  	struct ib_umem *region;
>  	struct irdma_mem_reg_req req;
> -	u32 total;
> -	u8 shadow_pgcnt = 1;
> -	bool use_pbles = false;
> -	unsigned long flags;
> -	int err = -EINVAL;
> +	int err;
>  
>  	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>  		return ERR_PTR(-EINVAL);
> @@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  		return (struct ib_mr *)iwmr;
>  	}
>  
> -	iwpbl = &iwmr->iwpbl;
> -
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
>  		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
> @@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
> -			shadow_pgcnt = 0;
> -		total = req.cq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -
> -		use_pbles = (req.cq_pages > 1);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
>  		if (err)
>  			goto error;
> -
> -		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_MEM:
>  		err = irdma_reg_user_mr_type_mem(iwmr, access);
> @@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  
>  		break;
>  	default:
> +		err = -EINVAL;
>  		goto error;
>  	}
>  
> -- 
> 2.31.1
>
Zhu Yanjun Jan. 16, 2023, 3:03 a.m. UTC | #2
January 15, 2023 7:28 PM, "Leon Romanovsky" <leon@kernel.org> wrote:

> On Wed, Jan 11, 2023 at 07:06:17PM -0500, Zhu Yanjun wrote:
> 
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>> 
>> Split the source codes related with CQ handling into a new function.
>> 
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>> drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
>> 1 file changed, 37 insertions(+), 26 deletions(-)
>> 
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index 74dd1972c325..3902c74d59f2 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>> return err;
>> }
>> 
>> +static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
>> + struct ib_udata *udata,
>> + struct irdma_mr *iwmr)
>> +{
>> + int err;
>> + u8 shadow_pgcnt = 1;
>> + bool use_pbles;
>> + struct irdma_ucontext *ucontext;
>> + unsigned long flags;
>> + u32 total;
>> + struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> + struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
> 
> It will be nice to see more structured variable initialization.
> 
> I'm not going to insist on it, but IMHO netdev reverse Christmas
> tree rule looks more appealing than this random list.

Got it. The structured variables are initialized.
And the netdev reverse Christmas tree rule is used in the commits.

> 
>> +
>> + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
>> + shadow_pgcnt = 0;
>> + total = req.cq_pages + shadow_pgcnt;
>> + if (total > iwmr->page_cnt)
>> + return -EINVAL;
>> +
>> + use_pbles = (req.cq_pages > 1);
>> + err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + if (err)
>> + return err;
>> +
>> + ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> + ibucontext);
>> + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> + iwpbl->on_list = true;
>> + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> +
>> + return err;
> 
> return 0;

I will send out the latest commits very soon.

Zhu Yanjun

> 
>> +}
>> +
>> /**
>> * irdma_reg_user_mr - Register a user memory region
>> * @pd: ptr of pd
>> @@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64
>> len,
>> {
>> #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
>> struct irdma_device *iwdev = to_iwdev(pd->device);
>> - struct irdma_ucontext *ucontext;
>> - struct irdma_pbl *iwpbl;
>> struct irdma_mr *iwmr;
>> struct ib_umem *region;
>> struct irdma_mem_reg_req req;
>> - u32 total;
>> - u8 shadow_pgcnt = 1;
>> - bool use_pbles = false;
>> - unsigned long flags;
>> - int err = -EINVAL;
>> + int err;
>> 
>> if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>> return ERR_PTR(-EINVAL);
>> @@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> return (struct ib_mr *)iwmr;
>> }
>> 
>> - iwpbl = &iwmr->iwpbl;
>> -
>> switch (req.reg_type) {
>> case IRDMA_MEMREG_TYPE_QP:
>> err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
>> @@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> 
>> break;
>> case IRDMA_MEMREG_TYPE_CQ:
>> - if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
>> - shadow_pgcnt = 0;
>> - total = req.cq_pages + shadow_pgcnt;
>> - if (total > iwmr->page_cnt) {
>> - err = -EINVAL;
>> - goto error;
>> - }
>> -
>> - use_pbles = (req.cq_pages > 1);
>> - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
>> if (err)
>> goto error;
>> -
>> - ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> - ibucontext);
>> - spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> - list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> - iwpbl->on_list = true;
>> - spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> break;
>> case IRDMA_MEMREG_TYPE_MEM:
>> err = irdma_reg_user_mr_type_mem(iwmr, access);
>> @@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> 
>> break;
>> default:
>> + err = -EINVAL;
>> goto error;
>> }
>> 
>> --
>> 2.31.1
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 74dd1972c325..3902c74d59f2 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2867,6 +2867,40 @@  static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
 	return err;
 }
 
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+				     struct ib_udata *udata,
+				     struct irdma_mr *iwmr)
+{
+	int err;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles;
+	struct irdma_ucontext *ucontext;
+	unsigned long flags;
+	u32 total;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+
+	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+		shadow_pgcnt = 0;
+	total = req.cq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	use_pbles = (req.cq_pages > 1);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2882,16 +2916,10 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 {
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
-	struct irdma_ucontext *ucontext;
-	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total;
-	u8 shadow_pgcnt = 1;
-	bool use_pbles = false;
-	unsigned long flags;
-	int err = -EINVAL;
+	int err;
 
 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
 		return ERR_PTR(-EINVAL);
@@ -2918,8 +2946,6 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return (struct ib_mr *)iwmr;
 	}
 
-	iwpbl = &iwmr->iwpbl;
-
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
@@ -2928,25 +2954,9 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
-		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-			shadow_pgcnt = 0;
-		total = req.cq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-
-		use_pbles = (req.cq_pages > 1);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
 		if (err)
 			goto error;
-
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
 		err = irdma_reg_user_mr_type_mem(iwmr, access);
@@ -2955,6 +2965,7 @@  static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	default:
+		err = -EINVAL;
 		goto error;
 	}