diff mbox series

[for-next] RDMA/rxe: Cleanup init_send_wqe

Message ID 20210206002437.2756-1-rpearson@hpe.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show
Series [for-next] RDMA/rxe: Cleanup init_send_wqe | expand

Commit Message

Bob Pearson Feb. 6, 2021, 12:24 a.m. UTC
This patch changes the type of init_send_wqe in rxe_verbs.c to void
since it always returns 0. It also separates out the code that copies
inline data into the send wqe as copy_inline_data_to_wqe().

Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
 drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
 1 file changed, 19 insertions(+), 23 deletions(-)

Comments

Zhu Yanjun Feb. 8, 2021, 3:08 a.m. UTC | #1
On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> This patch changes the type of init_send_wqe in rxe_verbs.c to void
> since it always returns 0. It also separates out the code that copies
> inline data into the send wqe as copy_inline_data_to_wqe().
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 984909e03b35..dee5e0e919d2 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
>         }
>  }
>
> -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> +                                   const struct ib_send_wr *ibwr)
> +{
> +       struct ib_sge *sge = ibwr->sg_list;
> +       u8 *p = wqe->dma.inline_data;
> +       int i;
> +
> +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> +               p += sge->length;
> +       }
> +}
> +
> +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>                          unsigned int mask, unsigned int length,
>                          struct rxe_send_wqe *wqe)
>  {
>         int num_sge = ibwr->num_sge;
> -       struct ib_sge *sge;
> -       int i;
> -       u8 *p;
>
>         init_send_wr(qp, &wqe->wr, ibwr);
>
> @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         if (unlikely(mask & WR_REG_MASK)) {
>                 wqe->mask = mask;
>                 wqe->state = wqe_state_posted;
> -               return 0;
> +               return;
>         }
>
>         if (qp_type(qp) == IB_QPT_UD ||
> @@ -578,20 +588,11 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>             qp_type(qp) == IB_QPT_GSI)
>                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
>
> -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> -               p = wqe->dma.inline_data;
> -
> -               sge = ibwr->sg_list;
> -               for (i = 0; i < num_sge; i++, sge++) {
> -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> -                                       sge->length);
> -
> -                       p += sge->length;
> -               }
> -       } else {
> +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> +               copy_inline_data_to_wqe(wqe, ibwr);
> +       else
>                 memcpy(wqe->dma.sge, ibwr->sg_list,
>                        num_sge * sizeof(struct ib_sge));
> -       }

I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
But this commit can not be applied successfully.

Zhu Yanjun
>
>         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
>                 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
> @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         wqe->dma.sge_offset     = 0;
>         wqe->state              = wqe_state_posted;
>         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> -
> -       return 0;
>  }
>
>  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         }
>
>         send_wqe = producer_addr(sq->queue);
> -
> -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> -       if (unlikely(err))
> -               goto err1;
> +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
>
>         advance_producer(sq->queue);
>         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> --
> 2.27.0
>
Pearson, Robert B Feb. 8, 2021, 5:46 p.m. UTC | #2
Sorry for the confusion. There was a previous patch sent the same day that fixed some checkpatch warnings. It has to be installed first. There must be some way to indicate this type of dependency.

bob

-----Original Message-----
From: Zhu Yanjun <zyjzyj2000@gmail.com> 
Sent: Sunday, February 7, 2021 9:09 PM
To: Bob Pearson <rpearsonhpe@gmail.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>; RDMA mailing list <linux-rdma@vger.kernel.org>; Pearson, Robert B <robert.pearson2@hpe.com>
Subject: Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe

On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> This patch changes the type of init_send_wqe in rxe_verbs.c to void 
> since it always returns 0. It also separates out the code that copies 
> inline data into the send wqe as copy_inline_data_to_wqe().
>
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 
> ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c 
> b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 984909e03b35..dee5e0e919d2 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
>         }
>  }
>
> -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr 
> *ibwr,
> +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> +                                   const struct ib_send_wr *ibwr) {
> +       struct ib_sge *sge = ibwr->sg_list;
> +       u8 *p = wqe->dma.inline_data;
> +       int i;
> +
> +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> +               p += sge->length;
> +       }
> +}
> +
> +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr 
> +*ibwr,
>                          unsigned int mask, unsigned int length,
>                          struct rxe_send_wqe *wqe)  {
>         int num_sge = ibwr->num_sge;
> -       struct ib_sge *sge;
> -       int i;
> -       u8 *p;
>
>         init_send_wr(qp, &wqe->wr, ibwr);
>
> @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         if (unlikely(mask & WR_REG_MASK)) {
>                 wqe->mask = mask;
>                 wqe->state = wqe_state_posted;
> -               return 0;
> +               return;
>         }
>
>         if (qp_type(qp) == IB_QPT_UD || @@ -578,20 +588,11 @@ static 
> int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>             qp_type(qp) == IB_QPT_GSI)
>                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, 
> sizeof(wqe->av));
>
> -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> -               p = wqe->dma.inline_data;
> -
> -               sge = ibwr->sg_list;
> -               for (i = 0; i < num_sge; i++, sge++) {
> -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> -                                       sge->length);
> -
> -                       p += sge->length;
> -               }
> -       } else {
> +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> +               copy_inline_data_to_wqe(wqe, ibwr);
> +       else
>                 memcpy(wqe->dma.sge, ibwr->sg_list,
>                        num_sge * sizeof(struct ib_sge));
> -       }

I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
But this commit can not be applied successfully.

Zhu Yanjun
>
>         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
>                 mask & WR_READ_OR_WRITE_MASK ? 
> rdma_wr(ibwr)->remote_addr : 0; @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         wqe->dma.sge_offset     = 0;
>         wqe->state              = wqe_state_posted;
>         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> -
> -       return 0;
>  }
>
>  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr 
> *ibwr, @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
>         }
>
>         send_wqe = producer_addr(sq->queue);
> -
> -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> -       if (unlikely(err))
> -               goto err1;
> +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
>
>         advance_producer(sq->queue);
>         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> --
> 2.27.0
>
Jason Gunthorpe Feb. 9, 2021, 12:41 a.m. UTC | #3
On Fri, Feb 05, 2021 at 06:24:37PM -0600, Bob Pearson wrote:
> This patch changes the type of init_send_wqe in rxe_verbs.c to void
> since it always returns 0. It also separates out the code that copies
> inline data into the send wqe as copy_inline_data_to_wqe().
> 
> Signed-off-by: Bob Pearson <rpearson@hpe.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_verbs.c | 42 ++++++++++++---------------
>  1 file changed, 19 insertions(+), 23 deletions(-)

Applied to for-next, thanks

Jason
Zhu Yanjun Feb. 9, 2021, 1:02 a.m. UTC | #4
On Tue, Feb 9, 2021 at 1:46 AM Pearson, Robert B
<robert.pearson2@hpe.com> wrote:
>
> Sorry for the confusion. There was a previous patch sent the same day that fixed some checkpatch warnings. It has to be installed first. There must be some way to indicate this type of dependency.

It had better make this patch and the pervious patch as a patch
series. So this problem will not occur again.

Thanks.

Zhu Yanjun
>
> bob
>
> -----Original Message-----
> From: Zhu Yanjun <zyjzyj2000@gmail.com>
> Sent: Sunday, February 7, 2021 9:09 PM
> To: Bob Pearson <rpearsonhpe@gmail.com>
> Cc: Jason Gunthorpe <jgg@nvidia.com>; RDMA mailing list <linux-rdma@vger.kernel.org>; Pearson, Robert B <robert.pearson2@hpe.com>
> Subject: Re: [PATCH for-next] RDMA/rxe: Cleanup init_send_wqe
>
> On Sat, Feb 6, 2021 at 8:25 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> >
> > This patch changes the type of init_send_wqe in rxe_verbs.c to void
> > since it always returns 0. It also separates out the code that copies
> > inline data into the send wqe as copy_inline_data_to_wqe().
> >
> > Signed-off-by: Bob Pearson <rpearson@hpe.com>
> > ---
> >  drivers/infiniband/sw/rxe/rxe_verbs.c | 42
> > ++++++++++++---------------
> >  1 file changed, 19 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c
> > b/drivers/infiniband/sw/rxe/rxe_verbs.c
> > index 984909e03b35..dee5e0e919d2 100644
> > --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> > +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> > @@ -555,14 +555,24 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
> >         }
> >  }
> >
> > -static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr
> > *ibwr,
> > +static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
> > +                                   const struct ib_send_wr *ibwr) {
> > +       struct ib_sge *sge = ibwr->sg_list;
> > +       u8 *p = wqe->dma.inline_data;
> > +       int i;
> > +
> > +       for (i = 0; i < ibwr->num_sge; i++, sge++) {
> > +               memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
> > +               p += sge->length;
> > +       }
> > +}
> > +
> > +static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr
> > +*ibwr,
> >                          unsigned int mask, unsigned int length,
> >                          struct rxe_send_wqe *wqe)  {
> >         int num_sge = ibwr->num_sge;
> > -       struct ib_sge *sge;
> > -       int i;
> > -       u8 *p;
> >
> >         init_send_wr(qp, &wqe->wr, ibwr);
> >
> > @@ -570,7 +580,7 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         if (unlikely(mask & WR_REG_MASK)) {
> >                 wqe->mask = mask;
> >                 wqe->state = wqe_state_posted;
> > -               return 0;
> > +               return;
> >         }
> >
> >         if (qp_type(qp) == IB_QPT_UD || @@ -578,20 +588,11 @@ static
> > int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >             qp_type(qp) == IB_QPT_GSI)
> >                 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av,
> > sizeof(wqe->av));
> >
> > -       if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
> > -               p = wqe->dma.inline_data;
> > -
> > -               sge = ibwr->sg_list;
> > -               for (i = 0; i < num_sge; i++, sge++) {
> > -                       memcpy(p, (void *)(uintptr_t)sge->addr,
> > -                                       sge->length);
> > -
> > -                       p += sge->length;
> > -               }
> > -       } else {
> > +       if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
> > +               copy_inline_data_to_wqe(wqe, ibwr);
> > +       else
> >                 memcpy(wqe->dma.sge, ibwr->sg_list,
> >                        num_sge * sizeof(struct ib_sge));
> > -       }
>
> I git clone  https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git,
> But this commit can not be applied successfully.
>
> Zhu Yanjun
> >
> >         wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
> >                 mask & WR_READ_OR_WRITE_MASK ?
> > rdma_wr(ibwr)->remote_addr : 0; @@ -603,8 +604,6 @@ static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         wqe->dma.sge_offset     = 0;
> >         wqe->state              = wqe_state_posted;
> >         wqe->ssn                = atomic_add_return(1, &qp->ssn);
> > -
> > -       return 0;
> >  }
> >
> >  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr
> > *ibwr, @@ -627,10 +626,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
> >         }
> >
> >         send_wqe = producer_addr(sq->queue);
> > -
> > -       err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
> > -       if (unlikely(err))
> > -               goto err1;
> > +       init_send_wqe(qp, ibwr, mask, length, send_wqe);
> >
> >         advance_producer(sq->queue);
> >         spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
> > --
> > 2.27.0
> >
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 984909e03b35..dee5e0e919d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -555,14 +555,24 @@  static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
 	}
 }
 
-static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
+				    const struct ib_send_wr *ibwr)
+{
+	struct ib_sge *sge = ibwr->sg_list;
+	u8 *p = wqe->dma.inline_data;
+	int i;
+
+	for (i = 0; i < ibwr->num_sge; i++, sge++) {
+		memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
+		p += sge->length;
+	}
+}
+
+static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 			 unsigned int mask, unsigned int length,
 			 struct rxe_send_wqe *wqe)
 {
 	int num_sge = ibwr->num_sge;
-	struct ib_sge *sge;
-	int i;
-	u8 *p;
 
 	init_send_wr(qp, &wqe->wr, ibwr);
 
@@ -570,7 +580,7 @@  static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	if (unlikely(mask & WR_REG_MASK)) {
 		wqe->mask = mask;
 		wqe->state = wqe_state_posted;
-		return 0;
+		return;
 	}
 
 	if (qp_type(qp) == IB_QPT_UD ||
@@ -578,20 +588,11 @@  static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	    qp_type(qp) == IB_QPT_GSI)
 		memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
 
-	if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
-		p = wqe->dma.inline_data;
-
-		sge = ibwr->sg_list;
-		for (i = 0; i < num_sge; i++, sge++) {
-			memcpy(p, (void *)(uintptr_t)sge->addr,
-					sge->length);
-
-			p += sge->length;
-		}
-	} else {
+	if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
+		copy_inline_data_to_wqe(wqe, ibwr);
+	else
 		memcpy(wqe->dma.sge, ibwr->sg_list,
 		       num_sge * sizeof(struct ib_sge));
-	}
 
 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
 		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
@@ -603,8 +604,6 @@  static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	wqe->dma.sge_offset	= 0;
 	wqe->state		= wqe_state_posted;
 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
-
-	return 0;
 }
 
 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -627,10 +626,7 @@  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	}
 
 	send_wqe = producer_addr(sq->queue);
-
-	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
-	if (unlikely(err))
-		goto err1;
+	init_send_wqe(qp, ibwr, mask, length, send_wqe);
 
 	advance_producer(sq->queue);
 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);