From patchwork Sun Dec 3 16:03:37 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yishai Hadas X-Patchwork-Id: 10089327 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 5F059605D2 for ; Sun, 3 Dec 2017 16:47:59 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 5924E288DA for ; Sun, 3 Dec 2017 16:47:59 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 4E50428B24; Sun, 3 Dec 2017 16:47:59 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id BF99328972 for ; Sun, 3 Dec 2017 16:47:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752321AbdLCQr4 (ORCPT ); Sun, 3 Dec 2017 11:47:56 -0500 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:35146 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1752239AbdLCQrz (ORCPT ); Sun, 3 Dec 2017 11:47:55 -0500 Received: from Internal Mail-Server by MTLPINE1 (envelope-from yishaih@mellanox.com) with ESMTPS (AES256-SHA encrypted); 3 Dec 2017 18:03:57 +0200 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [10.7.2.17]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id vB3G3vK2014411; Sun, 3 Dec 2017 18:03:57 +0200 Received: from vnc17.mtl.labs.mlnx (vnc17.mtl.labs.mlnx [127.0.0.1]) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8) with ESMTP id vB3G3v0h006309; Sun, 3 Dec 2017 18:03:57 +0200 Received: (from yishaih@localhost) by vnc17.mtl.labs.mlnx (8.13.8/8.13.8/Submit) id vB3G3vtx006308; Sun, 3 Dec 2017 18:03:57 +0200 From: Yishai Hadas To: linux-rdma@vger.kernel.org Cc: yishaih@mellanox.com, maorg@mellanox.com, majd@mellanox.com Subject: [PATCH V1 rdma-core 3/3] mlx5: Report tunneling offloads capabilities Date: Sun, 3 Dec 2017 18:03:37 +0200 Message-Id: <1512317017-6223-4-git-send-email-yishaih@mellanox.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1512317017-6223-1-git-send-email-yishaih@mellanox.com> References: <1512317017-6223-1-git-send-email-yishaih@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Maor Gottlieb Reports capability of which tunneling type supports the tunneling offloads. Signed-off-by: Maor Gottlieb Reviewed-by: Yishai Hadas --- providers/mlx5/man/mlx5dv_query_device.3 | 15 ++++++++++++++- providers/mlx5/mlx5-abi.h | 2 ++ providers/mlx5/mlx5.c | 5 +++++ providers/mlx5/mlx5.h | 1 + providers/mlx5/mlx5dv.h | 10 +++++++++- providers/mlx5/verbs.c | 1 + 6 files changed, 32 insertions(+), 2 deletions(-) diff --git a/providers/mlx5/man/mlx5dv_query_device.3 b/providers/mlx5/man/mlx5dv_query_device.3 index f7cfdc0..f4b8104 100644 --- a/providers/mlx5/man/mlx5dv_query_device.3 +++ b/providers/mlx5/man/mlx5dv_query_device.3 @@ -47,6 +47,7 @@ uint64_t flags; uint64_t comp_mask; /* Use enum mlx5dv_context_comp_mask */ struct mlx5dv_cqe_comp_caps cqe_comp_caps; struct mlx5dv_sw_parsing_caps sw_parsing_caps; +uint32_t tunnel_offloads_caps; .in -8 }; @@ -71,7 +72,8 @@ enum mlx5dv_context_comp_mask { MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0, MLX5DV_CONTEXT_MASK_SWP = 1 << 1, MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2, -MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3, +MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3, +MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4, .in -8 }; @@ -84,6 +86,17 @@ MLX5DV_SW_PARSING_CSUM = 1 << 1, MLX5DV_SW_PARSING_LSO = 1 << 2, .in -8 }; + +.PP +.nf +enum mlx5dv_tunnel_offloads { +.in +8 +MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0, +MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1, +MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2, +.in -8 +}; + .fi .SH "RETURN VALUE" 0 on success or the value of errno on failure (which indicates the failure reason). diff --git a/providers/mlx5/mlx5-abi.h b/providers/mlx5/mlx5-abi.h index b0b6704..afc9e6f 100644 --- a/providers/mlx5/mlx5-abi.h +++ b/providers/mlx5/mlx5-abi.h @@ -312,6 +312,8 @@ struct mlx5_query_device_ex_resp { __u32 flags; /* Use enum mlx5_query_dev_resp_flags */ struct mlx5dv_sw_parsing_caps sw_parsing_caps; struct mlx5_striding_rq_caps striding_rq_caps; + __u32 tunnel_offloads_caps; + __u32 reserved; }; #endif /* MLX5_ABI_H */ diff --git a/providers/mlx5/mlx5.c b/providers/mlx5/mlx5.c index 36b47d7..2a29983 100644 --- a/providers/mlx5/mlx5.c +++ b/providers/mlx5/mlx5.c @@ -646,6 +646,11 @@ int mlx5dv_query_device(struct ibv_context *ctx_in, comp_mask_out |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; } + if (attrs_out->comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) { + attrs_out->tunnel_offloads_caps = mctx->tunnel_offloads_caps; + comp_mask_out |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS; + } + attrs_out->comp_mask = comp_mask_out; return 0; diff --git a/providers/mlx5/mlx5.h b/providers/mlx5/mlx5.h index 7c85ab6..b9f10f4 100644 --- a/providers/mlx5/mlx5.h +++ b/providers/mlx5/mlx5.h @@ -293,6 +293,7 @@ struct mlx5_context { struct mlx5dv_ctx_allocators extern_alloc; struct mlx5dv_sw_parsing_caps sw_parsing_caps; struct mlx5dv_striding_rq_caps striding_rq_caps; + uint32_t tunnel_offloads_caps; }; struct mlx5_bitmap { diff --git a/providers/mlx5/mlx5dv.h b/providers/mlx5/mlx5dv.h index 95a1697..0e78c41 100644 --- a/providers/mlx5/mlx5dv.h +++ b/providers/mlx5/mlx5dv.h @@ -61,7 +61,8 @@ enum mlx5dv_context_comp_mask { MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0, MLX5DV_CONTEXT_MASK_SWP = 1 << 1, MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2, - MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3, + MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3, + MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4, }; struct mlx5dv_cqe_comp_caps { @@ -82,6 +83,12 @@ struct mlx5dv_striding_rq_caps { uint32_t supported_qpts; }; +enum mlx5dv_tunnel_offloads { + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0, + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1, + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2, +}; + /* * Direct verbs device-specific attributes */ @@ -92,6 +99,7 @@ struct mlx5dv_context { struct mlx5dv_cqe_comp_caps cqe_comp_caps; struct mlx5dv_sw_parsing_caps sw_parsing_caps; struct mlx5dv_striding_rq_caps striding_rq_caps; + uint32_t tunnel_offloads_caps; }; enum mlx5dv_context_flags { diff --git a/providers/mlx5/verbs.c b/providers/mlx5/verbs.c index 7d36434..a3323fd 100644 --- a/providers/mlx5/verbs.c +++ b/providers/mlx5/verbs.c @@ -2214,6 +2214,7 @@ int mlx5_query_device_ex(struct ibv_context *context, mctx->cqe_comp_caps = resp.cqe_comp_caps; mctx->sw_parsing_caps = resp.sw_parsing_caps; mctx->striding_rq_caps = resp.striding_rq_caps.caps; + mctx->tunnel_offloads_caps = resp.tunnel_offloads_caps; if (resp.flags & MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP) mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_CQE_128B_COMP;