From patchwork Thu Feb 19 22:02:43 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Somnath Kotur X-Patchwork-Id: 5850441 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 4EAA9BF440 for ; Thu, 19 Feb 2015 05:39:20 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 55BA5201F4 for ; Thu, 19 Feb 2015 05:39:19 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 3E2BC20211 for ; Thu, 19 Feb 2015 05:39:18 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752167AbbBSFjR (ORCPT ); Thu, 19 Feb 2015 00:39:17 -0500 Received: from cmexedge1.emulex.com ([138.239.224.99]:59828 "EHLO CMEXEDGE1.ext.emulex.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750864AbbBSFjR (ORCPT ); Thu, 19 Feb 2015 00:39:17 -0500 Received: from CMEXHTCAS2.ad.emulex.com (138.239.115.218) by CMEXEDGE1.ext.emulex.com (138.239.224.99) with Microsoft SMTP Server (TLS) id 14.3.174.1; Wed, 18 Feb 2015 21:39:18 -0800 Received: from codebrowse.emulex.com (10.192.207.129) by smtp.emulex.com (138.239.115.208) with Microsoft SMTP Server id 14.3.174.1; Wed, 18 Feb 2015 21:39:09 -0800 From: Somnath Kotur To: CC: , Moni Shoua , "Somnath Kotur" Subject: [PATCH 28/30] IB/mlx4: Enable send of RoCE QP1 packets with IP/UDP headers Date: Fri, 20 Feb 2015 03:32:43 +0530 X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1424383365-19337-1-git-send-email-somnath.kotur@emulex.com> References: <1424383365-19337-1-git-send-email-somnath.kotur@emulex.com> MIME-Version: 1.0 Message-ID: <86aec158-d18c-4cf5-bb33-6e822c94001c@CMEXHTCAS2.ad.emulex.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-3.7 required=5.0 tests=BAYES_00, DATE_IN_FUTURE_12_24, RCVD_IN_DNSWL_HI,T_RP_MATCHES_RCVD,UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Moni Shoua RoCEv2 packets are sent over IP/UDP protocols. The mlx4 driver uses a type of RAW QP to send packets for QP1 and therefore needs to build the network headers below BTH in software. This patche adds option to build QP1 packets with IP and UDP headers if RoCEv2 is requested. Signed-off-by: Moni Shoua Signed-off-by: Somnath Kotur --- drivers/infiniband/hw/mlx4/qp.c | 84 ++++++++++++++++++++++++--------------- 1 files changed, 52 insertions(+), 32 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f55f4d4..9996527 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -32,6 +32,8 @@ */ #include +#include +#include #include #include @@ -2164,16 +2166,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, return 0; } -static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac) -{ - int i; - - for (i = ETH_ALEN; i; i--) { - dst_mac[i - 1] = src_mac & 0xff; - src_mac >>= 8; - } -} - +#define MLX4_ROCEV2_QP1_SPORT 0xC000 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { @@ -2193,6 +2186,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, bool is_eth; bool is_vlan = false; bool is_grh; + bool is_udp = false; + int ip_version = 0; send_size = 0; for (i = 0; i < wr->num_sge; ++i) @@ -2201,6 +2196,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_grh = mlx4_ib_ah_grh_present(ah); if (is_eth) { + struct ib_gid_attr gid_attr; + if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so @@ -2211,21 +2208,29 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, if (err) return err; } else { - err = ib_get_cached_gid(ib_dev, + err = ib_get_cached_gid(sqp->qp.ibqp.device, be32_to_cpu(ah->av.ib.port_pd) >> 24, - ah->av.ib.gid_index, &sgid, - NULL); - if (err) + ah->av.ib.gid_index, &sgid, &gid_attr); + if (!err) { + is_udp = (gid_attr.gid_type == IB_GID_TYPE_ROCE_V2) ? true : false; + if (is_udp) { + if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) + ip_version = 4; + else + ip_version = 6; + is_grh = false; + } + } else { return err; + } } - if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; is_vlan = 1; } } err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, - 0, 0, 0, &sqp->ud_header); + ip_version, is_udp, 0, &sqp->ud_header); if (err) return err; @@ -2236,12 +2241,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); } - if (is_grh) { + if (is_grh || (ip_version == 6)) { sqp->ud_header.grh.traffic_class = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); - sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; + + sqp->ud_header.grh.hop_limit = (is_udp) ? + IPV6_DEFAULT_HOPLIMIT : ah->av.ib.hop_limit; if (is_eth) memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); else { @@ -2265,6 +2272,26 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, ah->av.ib.dgid, 16); } + if (ip_version == 4) { + sqp->ud_header.ip4.tos = + (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; + sqp->ud_header.ip4.id = 0; + sqp->ud_header.ip4.frag_off = htons(IP_DF); + sqp->ud_header.ip4.ttl = (is_udp) ? + IPV6_DEFAULT_HOPLIMIT : ah->av.eth.hop_limit; + + memcpy(&sqp->ud_header.ip4.saddr, + sgid.raw + 12, 4); + memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4); + sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header); + } + + if (is_udp) { + sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT); + sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT); + sqp->ud_header.udp.csum = 0; + } + mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); if (!is_eth) { @@ -2293,34 +2320,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, if (is_eth) { struct in6_addr in6; - + u16 ether_type; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; + ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE : + (ip_version == 4 ? ETH_P_IP : ETH_P_IPV6); + mlx->sched_prio = cpu_to_be16(pcp); + ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac); memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); - /* FIXME: cache smac value? */ memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); memcpy(&in6, sgid.raw, sizeof(in6)); - if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { - u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); - u8 smac[ETH_ALEN]; - - mlx4_u64_to_smac(smac, mac); - memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); - } else { - /* use the src mac of the tunnel */ - memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); - } if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); if (!is_vlan) { - sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); + sqp->ud_header.eth.type = cpu_to_be16(ether_type); } else { - sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); + sqp->ud_header.vlan.type = cpu_to_be16(ether_type); sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); } } else {