From patchwork Thu Jun 29 11:47:06 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yishai Hadas X-Patchwork-Id: 9816497 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 1159F6020A for ; Thu, 29 Jun 2017 11:47:52 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 050A427031 for ; Thu, 29 Jun 2017 11:47:52 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id EDEF727F54; Thu, 29 Jun 2017 11:47:51 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 8FC4427031 for ; Thu, 29 Jun 2017 11:47:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752694AbdF2Lru (ORCPT ); Thu, 29 Jun 2017 07:47:50 -0400 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:60619 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1752614AbdF2Lrt (ORCPT ); Thu, 29 Jun 2017 07:47:49 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from yishaih@mellanox.com) with ESMTPS (AES256-SHA encrypted); 29 Jun 2017 14:47:19 +0300 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [10.7.2.17]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v5TBlJnb017953; Thu, 29 Jun 2017 14:47:19 +0300 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [127.0.0.1]) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8) with ESMTP id v5TBlJoc017952; Thu, 29 Jun 2017 14:47:19 +0300 Received: (from yishaih@localhost) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8/Submit) id v5TBlJUv017951; Thu, 29 Jun 2017 14:47:19 +0300 From: Yishai Hadas To: linux-rdma@vger.kernel.org Cc: dledford@redhat.com, yishaih@mellanox.com, maorg@mellanox.com, majd@mellanox.com Subject: [PATCH rdma-core 5/7] mlx4: Convert to use predefined get CQE opcodes Date: Thu, 29 Jun 2017 14:47:06 +0300 Message-Id: <1498736828-17875-6-git-send-email-yishaih@mellanox.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1498736828-17875-1-git-send-email-yishaih@mellanox.com> References: <1498736828-17875-1-git-send-email-yishaih@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Maor Gottlieb Signed-off-by: Maor Gottlieb Reviewed-by: Yishai Hadas --- providers/mlx4/cq.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/providers/mlx4/cq.c b/providers/mlx4/cq.c index 068e0d2..50adebb 100644 --- a/providers/mlx4/cq.c +++ b/providers/mlx4/cq.c @@ -115,7 +115,7 @@ static enum ibv_wc_status mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe) static inline void handle_good_req(struct ibv_wc *wc, struct mlx4_cqe *cqe) { wc->wc_flags = 0; - switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cqe)) { case MLX4_OPCODE_RDMA_WRITE_IMM: wc->wc_flags |= IBV_WC_WITH_IMM; SWITCH_FALLTHROUGH; @@ -215,7 +215,7 @@ static inline int mlx4_parse_cqe(struct mlx4_cq *cq, wc->qp_num = qpn; is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; - is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + is_error = (mlx4dv_get_cqe_opcode(cqe)) == MLX4_CQE_OPCODE_ERROR; if ((qpn & MLX4_XRC_QPN_BIT) && !is_send) { @@ -278,7 +278,7 @@ static inline int mlx4_parse_cqe(struct mlx4_cq *cq, } else { wc->byte_len = be32toh(cqe->byte_cnt); - switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cqe)) { case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: wc->opcode = IBV_WC_RECV_RDMA_WITH_IMM; wc->wc_flags = IBV_WC_WITH_IMM; @@ -460,7 +460,7 @@ static enum ibv_wc_opcode mlx4_cq_read_wc_opcode(struct ibv_cq_ex *ibcq) struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); if (cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK) { - switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cq->cqe)) { case MLX4_OPCODE_RDMA_WRITE_IMM: case MLX4_OPCODE_RDMA_WRITE: return IBV_WC_RDMA_WRITE; @@ -480,7 +480,7 @@ static enum ibv_wc_opcode mlx4_cq_read_wc_opcode(struct ibv_cq_ex *ibcq) return IBV_WC_BIND_MW; } } else { - switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cq->cqe)) { case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: return IBV_WC_RECV_RDMA_WITH_IMM; case MLX4_RECV_OPCODE_SEND_INVAL: @@ -507,7 +507,7 @@ static int mlx4_cq_read_wc_flags(struct ibv_cq_ex *ibcq) int wc_flags = 0; if (is_send) { - switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cq->cqe)) { case MLX4_OPCODE_RDMA_WRITE_IMM: case MLX4_OPCODE_SEND_IMM: wc_flags |= IBV_WC_WITH_IMM; @@ -520,7 +520,7 @@ static int mlx4_cq_read_wc_flags(struct ibv_cq_ex *ibcq) htobe32(MLX4_CQE_STATUS_IPV4_CSUM_OK)) << IBV_WC_IP_CSUM_OK_SHIFT; - switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cq->cqe)) { case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: case MLX4_RECV_OPCODE_SEND_IMM: wc_flags |= IBV_WC_WITH_IMM; @@ -554,7 +554,7 @@ static uint32_t mlx4_cq_read_wc_imm_data(struct ibv_cq_ex *ibcq) { struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq)); - switch (cq->cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { + switch (mlx4dv_get_cqe_opcode(cq->cqe)) { case MLX4_RECV_OPCODE_SEND_INVAL: return be32toh(cq->cqe->immed_rss_invalid); default: @@ -756,7 +756,7 @@ void mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe) cqe = get_cqe(cq, (i & old_cqe)); cqe += cqe_inc; - while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { + while ((mlx4dv_get_cqe_opcode(cqe)) != MLX4_CQE_OPCODE_RESIZE) { cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | (((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * cq->cqe_size,