Message ID | 20180718162532.14633-20-bart.vanassche@wdc.com (mailing list archive) |
---|---|
State | Accepted |
Delegated to: | Jason Gunthorpe |
Headers | show |
> On Jul 18, 2018, at 12:25 PM, Bart Van Assche <Bart.VanAssche@wdc.com> wrote: > > Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL > as third argument to ib_post_(send|recv|srq_recv)(). > > Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> > Cc: Chuck Lever <chuck.lever@oracle.com> I guess the theory is that since ib_post_{send,recv} are declared static inline, the compiler can see that the third argument is a constant NULL and optimize away the conditional "bad_recv_wr ? : &dummy" at each call site. Reviewed-by: Chuck Lever <chuck.lever@oracle.com> > --- > net/sunrpc/xprtrdma/fmr_ops.c | 4 +--- > net/sunrpc/xprtrdma/frwr_ops.c | 4 ++-- > net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 3 +-- > net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 +- > net/sunrpc/xprtrdma/svc_rdma_sendto.c | 3 +-- > 5 files changed, 6 insertions(+), 10 deletions(-) > > diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c > index 17fb1e025654..0f7c465d9a5a 100644 > --- a/net/sunrpc/xprtrdma/fmr_ops.c > +++ b/net/sunrpc/xprtrdma/fmr_ops.c > @@ -279,9 +279,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, > static int > fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) > { > - struct ib_send_wr *bad_wr; > - > - return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr); > + return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL); > } > > /* Invalidate all memory regions that were registered for "req". > diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c > index c040de196e13..a167eebf63d5 100644 > --- a/net/sunrpc/xprtrdma/frwr_ops.c > +++ b/net/sunrpc/xprtrdma/frwr_ops.c > @@ -464,7 +464,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, > static int > frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) > { > - struct ib_send_wr *post_wr, *bad_wr; > + struct ib_send_wr *post_wr; > struct rpcrdma_mr *mr; > > post_wr = &req->rl_sendctx->sc_wr; > @@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) > /* If ib_post_send fails, the next ->send_request for > * @req will queue these MWs for recovery. > */ > - return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); > + return ib_post_send(ia->ri_id->qp, post_wr, NULL); > } > > /* Handle a remotely invalidated mr on the @mrs list > diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c > index 841fca143804..2ef75e885411 100644 > --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c > +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c > @@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, > static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, > struct svc_rdma_recv_ctxt *ctxt) > { > - struct ib_recv_wr *bad_recv_wr; > int ret; > > svc_xprt_get(&rdma->sc_xprt); > - ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr); > + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); > trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); > if (ret) > goto err_post; > diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c > index ce3ea8419704..80975427f523 100644 > --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c > +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c > @@ -329,7 +329,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) > do { > if (atomic_sub_return(cc->cc_sqecount, > &rdma->sc_sq_avail) > 0) { > - ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); > + ret = ib_post_send(rdma->sc_qp, first_wr, NULL); > trace_svcrdma_post_rw(&cc->cc_cqe, > cc->cc_sqecount, ret); > if (ret) > diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c > index 4a3efaea277c..ffef0c508f1a 100644 > --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c > +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c > @@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) > */ > int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) > { > - struct ib_send_wr *bad_wr; > int ret; > > might_sleep(); > @@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) > } > > svc_xprt_get(&rdma->sc_xprt); > - ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); > + ret = ib_post_send(rdma->sc_qp, wr, NULL); > trace_svcrdma_post_send(wr, ret); > if (ret) { > set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); > -- > 2.18.0 > > -- > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in > the body of a message to majordomo@vger.kernel.org > More majordomo info at http://vger.kernel.org/majordomo-info.html -- Chuck Lever -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On Thu, 2018-07-19 at 09:43 -0400, Chuck Lever wrote: > > On Jul 18, 2018, at 12:25 PM, Bart Van Assche <Bart.VanAssche@wdc.com> wrote: > > > > Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL > > as third argument to ib_post_(send|recv|srq_recv)(). > > > > Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> > > Cc: Chuck Lever <chuck.lever@oracle.com> > > I guess the theory is that since ib_post_{send,recv} are declared static inline, > the compiler can see that the third argument is a constant NULL and optimize > away the conditional "bad_recv_wr ? : &dummy" at each call site. > > Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Yes, I'm indeed assuming that the compiler will optimize out the ternary expression because ib_post_{send,recv} are declared inline. Thanks for the review! Bart.-- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 17fb1e025654..0f7c465d9a5a 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -279,9 +279,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, static int fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *bad_wr; - - return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL); } /* Invalidate all memory regions that were registered for "req". diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index c040de196e13..a167eebf63d5 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -464,7 +464,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, static int frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) { - struct ib_send_wr *post_wr, *bad_wr; + struct ib_send_wr *post_wr; struct rpcrdma_mr *mr; post_wr = &req->rl_sendctx->sc_wr; @@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req) /* If ib_post_send fails, the next ->send_request for * @req will queue these MWs for recovery. */ - return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); + return ib_post_send(ia->ri_id->qp, post_wr, NULL); } /* Handle a remotely invalidated mr on the @mrs list diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 841fca143804..2ef75e885411 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt) { - struct ib_recv_wr *bad_recv_wr; int ret; svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr); + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); if (ret) goto err_post; diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index ce3ea8419704..80975427f523 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -329,7 +329,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) do { if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { - ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); + ret = ib_post_send(rdma->sc_qp, first_wr, NULL); trace_svcrdma_post_rw(&cc->cc_cqe, cc->cc_sqecount, ret); if (ret) diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4a3efaea277c..ffef0c508f1a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) */ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) { - struct ib_send_wr *bad_wr; int ret; might_sleep(); @@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr) } svc_xprt_get(&rdma->sc_xprt); - ret = ib_post_send(rdma->sc_qp, wr, &bad_wr); + ret = ib_post_send(rdma->sc_qp, wr, NULL); trace_svcrdma_post_send(wr, ret); if (ret) { set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL as third argument to ib_post_(send|recv|srq_recv)(). Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Chuck Lever <chuck.lever@oracle.com> --- net/sunrpc/xprtrdma/fmr_ops.c | 4 +--- net/sunrpc/xprtrdma/frwr_ops.c | 4 ++-- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 3 +-- net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 3 +-- 5 files changed, 6 insertions(+), 10 deletions(-)