diff mbox series

[rdma-core,2/2] libhns: Bugfix for atomic operation in user mode

Message ID 1538301097-95641-3-git-send-email-oulijun@huawei.com (mailing list archive)
State Superseded
Headers show
Series two misc fixes for libhns | expand

Commit Message

Lijun Ou Sept. 30, 2018, 9:51 a.m. UTC
The atomic operation not support to inline. Besides, the
standard atomic operation only support a sge and the sge
place in wqe. This patch mainly ajdust the code.

Fix: d92b0f5("libhns: Add atomic support for hip08 user mode")
Signed-off-by: Lijun Ou <oulijun@huawei.com>
---
 providers/hns/hns_roce_u_hw_v2.c | 17 ++++++-----------
 1 file changed, 6 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c
index b155c3d..449b6cb 100644
--- a/providers/hns/hns_roce_u_hw_v2.c
+++ b/providers/hns/hns_roce_u_hw_v2.c
@@ -686,8 +686,6 @@  static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 				rc_sq_wqe->rkey = htole32(wr->wr.atomic.rkey);
 				rc_sq_wqe->va =
 					htole64(wr->wr.atomic.remote_addr);
-				wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
-				set_atomic_seg(wqe, wr);
 				break;
 
 			case IBV_WR_ATOMIC_FETCH_AND_ADD:
@@ -698,8 +696,6 @@  static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 				rc_sq_wqe->rkey = htole32(wr->wr.atomic.rkey);
 				rc_sq_wqe->va =
 					htole64(wr->wr.atomic.remote_addr);
-				wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
-				set_atomic_seg(wqe, wr);
 				break;
 			default:
 				roce_set_field(rc_sq_wqe->byte_4,
@@ -718,14 +714,13 @@  static int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
 			break;
 		}
 
+		dseg = wqe;
 		if (wr->opcode == IBV_WR_ATOMIC_FETCH_AND_ADD ||
-		    wr->opcode == IBV_WR_ATOMIC_CMP_AND_SWP)
-			dseg = wqe - sizeof(struct hns_roce_v2_wqe_data_seg);
-		else
-			dseg = wqe;
-
-		/* Inline */
-		if (wr->send_flags & IBV_SEND_INLINE && wr->num_sge) {
+		    wr->opcode == IBV_WR_ATOMIC_CMP_AND_SWP) {
+			set_data_seg_v2(dseg, wr->sg_list);
+			wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
+			set_atomic_seg(wqe, wr);
+		} else if (wr->send_flags & IBV_SEND_INLINE && wr->num_sge) {
 			if (le32toh(rc_sq_wqe->msg_len) > qp->max_inline_data) {
 				ret = EINVAL;
 				*bad_wr = wr;