diff mbox series

[for-next,v5,06/10] RDMA/rxe: Move local ops to subroutine

Message ID 20210422161341.41929-7-rpearson@hpe.com (mailing list archive)
State Superseded
Headers show
Series RDMA/rxe: Implement memory windows | expand

Commit Message

Bob Pearson April 22, 2021, 4:13 p.m. UTC
Simplify rxe_requester() by moving the local operations
to a subroutine. Add an error return for illegal send WR opcode.
Moved next_index ahead of rxe_run_task which fixed a small bug where
work completions were delayed until after the next wqe which was not
the intended behavior.

Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
 drivers/infiniband/sw/rxe/rxe_req.c | 89 +++++++++++++++++------------
 1 file changed, 54 insertions(+), 35 deletions(-)

Comments

Zhu Yanjun April 25, 2021, 4:34 a.m. UTC | #1
On Fri, Apr 23, 2021 at 12:13 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> Simplify rxe_requester() by moving the local operations
> to a subroutine. Add an error return for illegal send WR opcode.
> Moved next_index ahead of rxe_run_task which fixed a small bug where
> work completions were delayed until after the next wqe which was not
> the intended behavior.
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_req.c | 89 +++++++++++++++++------------
>  1 file changed, 54 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
> index 0d4dcd514c55..0cf97e3db29f 100644
> --- a/drivers/infiniband/sw/rxe/rxe_req.c
> +++ b/drivers/infiniband/sw/rxe/rxe_req.c
> @@ -555,6 +555,56 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
>                           jiffies + qp->qp_timeout_jiffies);
>  }
>
> +static int do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)

rxe_do_local_ops if not used out of softroce.

> +{
> +       u8 opcode = wqe->wr.opcode;
> +       struct rxe_dev *rxe;
> +       struct rxe_mr *mr;
> +       u32 rkey;
> +
> +       switch (opcode) {
> +       case IB_WR_LOCAL_INV:
> +               rxe = to_rdev(qp->ibqp.device);
> +               rkey = wqe->wr.ex.invalidate_rkey;
> +               mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
> +               if (!mr) {
> +                       pr_err("No MR for rkey %#x\n", rkey);
> +                       wqe->state = wqe_state_error;
> +                       wqe->status = IB_WC_LOC_QP_OP_ERR;
> +                       return -EINVAL;
> +               }
> +               mr->state = RXE_MR_STATE_FREE;
> +               rxe_drop_ref(mr);
> +               break;
> +       case IB_WR_REG_MR:
> +               mr = to_rmr(wqe->wr.wr.reg.mr);
> +
> +               rxe_add_ref(mr);
> +               mr->state = RXE_MR_STATE_VALID;
> +               mr->access = wqe->wr.wr.reg.access;
> +               mr->ibmr.lkey = wqe->wr.wr.reg.key;
> +               mr->ibmr.rkey = wqe->wr.wr.reg.key;
> +               mr->iova = wqe->wr.wr.reg.mr->iova;
> +               rxe_drop_ref(mr);
> +               break;
> +       default:
> +               pr_err("Unexpected send wqe opcode %d\n", opcode);
> +               wqe->state = wqe_state_error;
> +               wqe->status = IB_WC_LOC_QP_OP_ERR;
> +               return -EINVAL;
> +       }
> +
> +       wqe->state = wqe_state_done;
> +       wqe->status = IB_WC_SUCCESS;
> +       qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
> +
> +       if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
> +           qp->sq_sig_type == IB_SIGNAL_ALL_WR)
> +               rxe_run_task(&qp->comp.task, 1);
> +
> +       return 0;
> +}
> +
>  int rxe_requester(void *arg)
>  {
>         struct rxe_qp *qp = (struct rxe_qp *)arg;
> @@ -594,42 +644,11 @@ int rxe_requester(void *arg)
>                 goto exit;
>
>         if (wqe->mask & WR_LOCAL_OP_MASK) {
> -               if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
> -                       struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
> -                       struct rxe_mr *rmr;
> -
> -                       rmr = rxe_pool_get_index(&rxe->mr_pool,
> -                                                wqe->wr.ex.invalidate_rkey >> 8);
> -                       if (!rmr) {
> -                               pr_err("No mr for key %#x\n",
> -                                      wqe->wr.ex.invalidate_rkey);
> -                               wqe->state = wqe_state_error;
> -                               wqe->status = IB_WC_MW_BIND_ERR;
> -                               goto exit;
> -                       }
> -                       rmr->state = RXE_MR_STATE_FREE;
> -                       rxe_drop_ref(rmr);
> -                       wqe->state = wqe_state_done;
> -                       wqe->status = IB_WC_SUCCESS;
> -               } else if (wqe->wr.opcode == IB_WR_REG_MR) {
> -                       struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
> -
> -                       rmr->state = RXE_MR_STATE_VALID;
> -                       rmr->access = wqe->wr.wr.reg.access;
> -                       rmr->ibmr.lkey = wqe->wr.wr.reg.key;
> -                       rmr->ibmr.rkey = wqe->wr.wr.reg.key;
> -                       rmr->iova = wqe->wr.wr.reg.mr->iova;
> -                       wqe->state = wqe_state_done;
> -                       wqe->status = IB_WC_SUCCESS;
> -               } else {
> +               ret = do_local_ops(qp, wqe);
> +               if (unlikely(ret))
>                         goto exit;
> -               }
> -               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
> -                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
> -                       rxe_run_task(&qp->comp.task, 1);
> -               qp->req.wqe_index = next_index(qp->sq.queue,
> -                                               qp->req.wqe_index);
> -               goto next_wqe;
> +               else
> +                       goto next_wqe;
>         }
>
>         if (unlikely(qp_type(qp) == IB_QPT_RC &&
> --
> 2.27.0
>
Bob Pearson April 28, 2021, 4:16 p.m. UTC | #2
On 4/24/2021 11:34 PM, Zhu Yanjun wrote:
> On Fri, Apr 23, 2021 at 12:13 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>> Simplify rxe_requester() by moving the local operations
>> to a subroutine. Add an error return for illegal send WR opcode.
>> Moved next_index ahead of rxe_run_task which fixed a small bug where
>> work completions were delayed until after the next wqe which was not
>> the intended behavior.
>>
>> Signed-off-by: Bob Pearson <rpearson@hpe.com>
>> ---
>>   drivers/infiniband/sw/rxe/rxe_req.c | 89 +++++++++++++++++------------
>>   1 file changed, 54 insertions(+), 35 deletions(-)
>>
>> diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
>> index 0d4dcd514c55..0cf97e3db29f 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_req.c
>> +++ b/drivers/infiniband/sw/rxe/rxe_req.c
>> @@ -555,6 +555,56 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
>>                            jiffies + qp->qp_timeout_jiffies);
>>   }
>>
>> +static int do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
> rxe_do_local_ops if not used out of softroce.


Same issue. I am trying to be consistent with the original style (which 
mostly I wrote). Static names

don't need to be elaborate.

>
>> +{
>> +       u8 opcode = wqe->wr.opcode;
>> +       struct rxe_dev *rxe;
>> +       struct rxe_mr *mr;
>> +       u32 rkey;
>> +
>> +       switch (opcode) {
>> +       case IB_WR_LOCAL_INV:
>> +               rxe = to_rdev(qp->ibqp.device);
>> +               rkey = wqe->wr.ex.invalidate_rkey;
>> +               mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
>> +               if (!mr) {
>> +                       pr_err("No MR for rkey %#x\n", rkey);
>> +                       wqe->state = wqe_state_error;
>> +                       wqe->status = IB_WC_LOC_QP_OP_ERR;
>> +                       return -EINVAL;
>> +               }
>> +               mr->state = RXE_MR_STATE_FREE;
>> +               rxe_drop_ref(mr);
>> +               break;
>> +       case IB_WR_REG_MR:
>> +               mr = to_rmr(wqe->wr.wr.reg.mr);
>> +
>> +               rxe_add_ref(mr);
>> +               mr->state = RXE_MR_STATE_VALID;
>> +               mr->access = wqe->wr.wr.reg.access;
>> +               mr->ibmr.lkey = wqe->wr.wr.reg.key;
>> +               mr->ibmr.rkey = wqe->wr.wr.reg.key;
>> +               mr->iova = wqe->wr.wr.reg.mr->iova;
>> +               rxe_drop_ref(mr);
>> +               break;
>> +       default:
>> +               pr_err("Unexpected send wqe opcode %d\n", opcode);
>> +               wqe->state = wqe_state_error;
>> +               wqe->status = IB_WC_LOC_QP_OP_ERR;
>> +               return -EINVAL;
>> +       }
>> +
>> +       wqe->state = wqe_state_done;
>> +       wqe->status = IB_WC_SUCCESS;
>> +       qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
>> +
>> +       if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
>> +           qp->sq_sig_type == IB_SIGNAL_ALL_WR)
>> +               rxe_run_task(&qp->comp.task, 1);
>> +
>> +       return 0;
>> +}
>> +
>>   int rxe_requester(void *arg)
>>   {
>>          struct rxe_qp *qp = (struct rxe_qp *)arg;
>> @@ -594,42 +644,11 @@ int rxe_requester(void *arg)
>>                  goto exit;
>>
>>          if (wqe->mask & WR_LOCAL_OP_MASK) {
>> -               if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
>> -                       struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
>> -                       struct rxe_mr *rmr;
>> -
>> -                       rmr = rxe_pool_get_index(&rxe->mr_pool,
>> -                                                wqe->wr.ex.invalidate_rkey >> 8);
>> -                       if (!rmr) {
>> -                               pr_err("No mr for key %#x\n",
>> -                                      wqe->wr.ex.invalidate_rkey);
>> -                               wqe->state = wqe_state_error;
>> -                               wqe->status = IB_WC_MW_BIND_ERR;
>> -                               goto exit;
>> -                       }
>> -                       rmr->state = RXE_MR_STATE_FREE;
>> -                       rxe_drop_ref(rmr);
>> -                       wqe->state = wqe_state_done;
>> -                       wqe->status = IB_WC_SUCCESS;
>> -               } else if (wqe->wr.opcode == IB_WR_REG_MR) {
>> -                       struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
>> -
>> -                       rmr->state = RXE_MR_STATE_VALID;
>> -                       rmr->access = wqe->wr.wr.reg.access;
>> -                       rmr->ibmr.lkey = wqe->wr.wr.reg.key;
>> -                       rmr->ibmr.rkey = wqe->wr.wr.reg.key;
>> -                       rmr->iova = wqe->wr.wr.reg.mr->iova;
>> -                       wqe->state = wqe_state_done;
>> -                       wqe->status = IB_WC_SUCCESS;
>> -               } else {
>> +               ret = do_local_ops(qp, wqe);
>> +               if (unlikely(ret))
>>                          goto exit;
>> -               }
>> -               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
>> -                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
>> -                       rxe_run_task(&qp->comp.task, 1);
>> -               qp->req.wqe_index = next_index(qp->sq.queue,
>> -                                               qp->req.wqe_index);
>> -               goto next_wqe;
>> +               else
>> +                       goto next_wqe;
>>          }
>>
>>          if (unlikely(qp_type(qp) == IB_QPT_RC &&
>> --
>> 2.27.0
>>
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0d4dcd514c55..0cf97e3db29f 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -555,6 +555,56 @@  static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 			  jiffies + qp->qp_timeout_jiffies);
 }
 
+static int do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+	u8 opcode = wqe->wr.opcode;
+	struct rxe_dev *rxe;
+	struct rxe_mr *mr;
+	u32 rkey;
+
+	switch (opcode) {
+	case IB_WR_LOCAL_INV:
+		rxe = to_rdev(qp->ibqp.device);
+		rkey = wqe->wr.ex.invalidate_rkey;
+		mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
+		if (!mr) {
+			pr_err("No MR for rkey %#x\n", rkey);
+			wqe->state = wqe_state_error;
+			wqe->status = IB_WC_LOC_QP_OP_ERR;
+			return -EINVAL;
+		}
+		mr->state = RXE_MR_STATE_FREE;
+		rxe_drop_ref(mr);
+		break;
+	case IB_WR_REG_MR:
+		mr = to_rmr(wqe->wr.wr.reg.mr);
+
+		rxe_add_ref(mr);
+		mr->state = RXE_MR_STATE_VALID;
+		mr->access = wqe->wr.wr.reg.access;
+		mr->ibmr.lkey = wqe->wr.wr.reg.key;
+		mr->ibmr.rkey = wqe->wr.wr.reg.key;
+		mr->iova = wqe->wr.wr.reg.mr->iova;
+		rxe_drop_ref(mr);
+		break;
+	default:
+		pr_err("Unexpected send wqe opcode %d\n", opcode);
+		wqe->state = wqe_state_error;
+		wqe->status = IB_WC_LOC_QP_OP_ERR;
+		return -EINVAL;
+	}
+
+	wqe->state = wqe_state_done;
+	wqe->status = IB_WC_SUCCESS;
+	qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+
+	if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+	    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+		rxe_run_task(&qp->comp.task, 1);
+
+	return 0;
+}
+
 int rxe_requester(void *arg)
 {
 	struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -594,42 +644,11 @@  int rxe_requester(void *arg)
 		goto exit;
 
 	if (wqe->mask & WR_LOCAL_OP_MASK) {
-		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
-			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
-			struct rxe_mr *rmr;
-
-			rmr = rxe_pool_get_index(&rxe->mr_pool,
-						 wqe->wr.ex.invalidate_rkey >> 8);
-			if (!rmr) {
-				pr_err("No mr for key %#x\n",
-				       wqe->wr.ex.invalidate_rkey);
-				wqe->state = wqe_state_error;
-				wqe->status = IB_WC_MW_BIND_ERR;
-				goto exit;
-			}
-			rmr->state = RXE_MR_STATE_FREE;
-			rxe_drop_ref(rmr);
-			wqe->state = wqe_state_done;
-			wqe->status = IB_WC_SUCCESS;
-		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
-			struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);
-
-			rmr->state = RXE_MR_STATE_VALID;
-			rmr->access = wqe->wr.wr.reg.access;
-			rmr->ibmr.lkey = wqe->wr.wr.reg.key;
-			rmr->ibmr.rkey = wqe->wr.wr.reg.key;
-			rmr->iova = wqe->wr.wr.reg.mr->iova;
-			wqe->state = wqe_state_done;
-			wqe->status = IB_WC_SUCCESS;
-		} else {
+		ret = do_local_ops(qp, wqe);
+		if (unlikely(ret))
 			goto exit;
-		}
-		if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-		    qp->sq_sig_type == IB_SIGNAL_ALL_WR)
-			rxe_run_task(&qp->comp.task, 1);
-		qp->req.wqe_index = next_index(qp->sq.queue,
-						qp->req.wqe_index);
-		goto next_wqe;
+		else
+			goto next_wqe;
 	}
 
 	if (unlikely(qp_type(qp) == IB_QPT_RC &&