diff mbox series

[rdma-next,04/10] RDMA/rtrs: Enable Relaxed Ordering

Message ID 20210405052404.213889-5-leon@kernel.org (mailing list archive)
State Awaiting Upstream
Delegated to: Netdev Maintainers
Headers show
Series Enable relaxed ordering for ULPs | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Leon Romanovsky April 5, 2021, 5:23 a.m. UTC
From: Avihai Horon <avihaih@nvidia.com>

Enable Relaxed Ordering fro rtrs client and server.

Relaxed Ordering is an optional access flag and as such, it is ignored
by vendors that don't support it.

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/ulp/rtrs/rtrs-clt.c |  6 ++++--
 drivers/infiniband/ulp/rtrs/rtrs-srv.c | 15 ++++++++-------
 2 files changed, 12 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 0d3960ed5b2b..a3fbb47a3574 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1099,7 +1099,8 @@  static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
 			.mr = req->mr,
 			.key = req->mr->rkey,
 			.access = (IB_ACCESS_LOCAL_WRITE |
-				   IB_ACCESS_REMOTE_WRITE),
+				   IB_ACCESS_REMOTE_WRITE |
+				   IB_ACCESS_RELAXED_ORDERING),
 		};
 		wr = &rwr.wr;
 
@@ -1260,7 +1261,8 @@  static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
 			goto out;
 
 		req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
-				      sess->max_pages_per_mr, 0);
+				      sess->max_pages_per_mr,
+				      IB_ACCESS_RELAXED_ORDERING);
 		if (IS_ERR(req->mr)) {
 			err = PTR_ERR(req->mr);
 			req->mr = NULL;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 575f31ff20fd..c28ed5e2245d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -312,8 +312,8 @@  static int rdma_write_sg(struct rtrs_srv_op *id)
 		rwr.mr = srv_mr->mr;
 		rwr.wr.send_flags = 0;
 		rwr.key = srv_mr->mr->rkey;
-		rwr.access = (IB_ACCESS_LOCAL_WRITE |
-			      IB_ACCESS_REMOTE_WRITE);
+		rwr.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+			      IB_ACCESS_RELAXED_ORDERING);
 		msg = srv_mr->iu->buf;
 		msg->buf_id = cpu_to_le16(id->msg_id);
 		msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
@@ -432,8 +432,8 @@  static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
 		rwr.wr.send_flags = 0;
 		rwr.mr = srv_mr->mr;
 		rwr.key = srv_mr->mr->rkey;
-		rwr.access = (IB_ACCESS_LOCAL_WRITE |
-			      IB_ACCESS_REMOTE_WRITE);
+		rwr.access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+			      IB_ACCESS_RELAXED_ORDERING);
 		msg = srv_mr->iu->buf;
 		msg->buf_id = cpu_to_le16(id->msg_id);
 		msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
@@ -638,7 +638,7 @@  static int map_cont_bufs(struct rtrs_srv_sess *sess)
 			goto free_sg;
 		}
 		mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
-				 sgt->nents, 0);
+				 sgt->nents, IB_ACCESS_RELAXED_ORDERING);
 		if (IS_ERR(mr)) {
 			err = PTR_ERR(mr);
 			goto unmap_sg;
@@ -823,8 +823,9 @@  static int process_info_req(struct rtrs_srv_con *con,
 		rwr[mri].wr.send_flags = 0;
 		rwr[mri].mr = mr;
 		rwr[mri].key = mr->rkey;
-		rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
-				   IB_ACCESS_REMOTE_WRITE);
+		rwr[mri].access =
+			(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+			 IB_ACCESS_RELAXED_ORDERING);
 		reg_wr = &rwr[mri].wr;
 	}