From patchwork Thu May 8 06:52:40 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Or Gerlitz X-Patchwork-Id: 4133511 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id DF8AE9F1E1 for ; Thu, 8 May 2014 06:52:52 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 0AC14202B8 for ; Thu, 8 May 2014 06:52:52 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0DE3D202EA for ; Thu, 8 May 2014 06:52:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752409AbaEHGwu (ORCPT ); Thu, 8 May 2014 02:52:50 -0400 Received: from mailp.voltaire.com ([193.47.165.129]:38943 "EHLO mellanox.co.il" rhost-flags-OK-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1751942AbaEHGwt (ORCPT ); Thu, 8 May 2014 02:52:49 -0400 Received: from Internal Mail-Server by MTLPINE2 (envelope-from ogerlitz@mellanox.com) with SMTP; 8 May 2014 09:52:41 +0300 Received: from r-vnc04.mtr.labs.mlnx (r-vnc04.mtr.labs.mlnx [10.208.0.116]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id s486qfXb008067; Thu, 8 May 2014 09:52:41 +0300 From: Or Gerlitz To: yishaih@mellanox.com Cc: linux-rdma@vger.kernel.org, roland@kernel.org, matanb@mellanox.com, dledford@redhat.com, Or Gerlitz Subject: [PATCH libmlx4 V2 2/2] Add ibv_query_port_ex support Date: Thu, 8 May 2014 09:52:40 +0300 Message-Id: <1399531960-30738-3-git-send-email-ogerlitz@mellanox.com> X-Mailer: git-send-email 1.7.8.2 In-Reply-To: <1399531960-30738-1-git-send-email-ogerlitz@mellanox.com> References: <1399531960-30738-1-git-send-email-ogerlitz@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Matan Barak This patch adds the new extended support for query_port. The purpose of this is: 1. Request fields that aren't availible by today's ibv_query_port 2. Don't fetch fields that the user doesn't need. Hence, there is more chance to optimize. 3. Cache link layer's type in mlx4_context. Caching will allow us to avoid ibv_query_port calls and save time in ibv_create_ah. Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz --- src/mlx4.c | 4 +++ src/mlx4.h | 9 +++++++ src/verbs.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 80 insertions(+), 6 deletions(-) diff --git a/src/mlx4.c b/src/mlx4.c index 5943750..c33c94d 100644 --- a/src/mlx4.c +++ b/src/mlx4.c @@ -157,6 +157,8 @@ static int mlx4_init_context(struct verbs_device *v_device, context->qp_table_shift = ffs(context->num_qps) - 1 - MLX4_QP_TABLE_BITS; context->qp_table_mask = (1 << context->qp_table_shift) - 1; + for (i = 0; i < MLX4_PORTS_NUM; ++i) + context->port_query_cache[i].valid = 0; pthread_mutex_init(&context->qp_table_mutex, NULL); for (i = 0; i < MLX4_QP_TABLE_SIZE; ++i) @@ -207,6 +209,8 @@ static int mlx4_init_context(struct verbs_device *v_device, verbs_set_ctx_op(verbs_ctx, drv_ibv_create_flow, ibv_cmd_create_flow); verbs_set_ctx_op(verbs_ctx, drv_ibv_destroy_flow, ibv_cmd_destroy_flow); verbs_set_ctx_op(verbs_ctx, drv_ibv_create_ah_ex, mlx4_create_ah_ex); + verbs_set_ctx_op(verbs_ctx, drv_query_port_ex, + mlx4_query_port_ex); return 0; diff --git a/src/mlx4.h b/src/mlx4.h index 3015357..06fd2ba 100644 --- a/src/mlx4.h +++ b/src/mlx4.h @@ -40,6 +40,8 @@ #include #include +#define MLX4_PORTS_NUM 2 + #ifdef HAVE_VALGRIND_MEMCHECK_H # include @@ -189,6 +191,11 @@ struct mlx4_context { pthread_mutex_t db_list_mutex; int cqe_size; struct mlx4_xsrq_table xsrq_table; + struct { + uint8_t valid; + uint8_t link_layer; + enum ibv_port_cap_flags caps; + } port_query_cache[MLX4_PORTS_NUM]; }; struct mlx4_buf { @@ -354,6 +361,8 @@ int mlx4_query_device(struct ibv_context *context, struct ibv_device_attr *attr); int mlx4_query_port(struct ibv_context *context, uint8_t port, struct ibv_port_attr *attr); +int mlx4_query_port_ex(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr_ex *port_attr); struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context); int mlx4_free_pd(struct ibv_pd *pd); diff --git a/src/verbs.c b/src/verbs.c index e322a34..b29f0a6 100644 --- a/src/verbs.c +++ b/src/verbs.c @@ -70,8 +70,63 @@ int mlx4_query_port(struct ibv_context *context, uint8_t port, struct ibv_port_attr *attr) { struct ibv_query_port cmd; + int err; + + err = ibv_cmd_query_port(context, port, attr, &cmd, sizeof(cmd)); + if (!err && port <= MLX4_PORTS_NUM && port > 0) { + struct mlx4_context *mctx = to_mctx(context); + if (!mctx->port_query_cache[port - 1].valid) { + mctx->port_query_cache[port - 1].link_layer = + attr->link_layer; + mctx->port_query_cache[port - 1].caps = + attr->port_cap_flags; + mctx->port_query_cache[port - 1].valid = 1; + } + } + + return err; +} + +int mlx4_query_port_ex(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr_ex *port_attr) +{ + /* Check that only valid flags were given */ + if (!(port_attr->comp_mask & IBV_QUERY_PORT_EX_ATTR_MASK1) || + (port_attr->comp_mask & ~IBV_QUERY_PORT_EX_ATTR_MASKS) || + (port_attr->mask1 & ~IBV_QUERY_PORT_EX_MASK)) { + return EINVAL; + } - return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd); + /* Optimize the link type query */ + if (port_attr->comp_mask == IBV_QUERY_PORT_EX_ATTR_MASK1) { + if (!(port_attr->mask1 & ~(IBV_QUERY_PORT_EX_LINK_LAYER | + IBV_QUERY_PORT_EX_CAP_FLAGS))) { + struct mlx4_context *mctx = to_mctx(context); + if (port_num <= 0 || port_num > MLX4_PORTS_NUM) + return EINVAL; + if (mctx->port_query_cache[port_num - 1].valid) { + if (port_attr->mask1 & + IBV_QUERY_PORT_EX_LINK_LAYER) + port_attr->link_layer = + mctx-> + port_query_cache[port_num - 1]. + link_layer; + if (port_attr->mask1 & + IBV_QUERY_PORT_EX_CAP_FLAGS) + port_attr->port_cap_flags = + mctx-> + port_query_cache[port_num - 1]. + caps; + return 0; + } + } + if (port_attr->mask1 & IBV_QUERY_PORT_EX_STD_MASK) { + return mlx4_query_port(context, port_num, + &port_attr->port_attr); + } + } + + return EOPNOTSUPP; } struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context) @@ -824,15 +879,18 @@ static struct ibv_ah *mlx4_create_ah_common(struct ibv_pd *pd, struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr) { struct ibv_ah *ah; - struct ibv_port_attr port_attr; + struct ibv_port_attr_ex port_attr; + + port_attr.comp_mask = IBV_QUERY_PORT_EX_ATTR_MASK1; + port_attr.mask1 = IBV_QUERY_PORT_EX_LINK_LAYER; - if (ibv_query_port(pd->context, attr->port_num, &port_attr)) + if (ibv_query_port_ex(pd->context, attr->port_num, &port_attr)) return NULL; ah = mlx4_create_ah_common(pd, attr, port_attr.link_layer); if (NULL != ah && (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET || - !mlx4_resolve_grh_to_l2(pd, to_mah(ah), attr))) + !mlx4_resolve_grh_to_l2(pd, to_mah(ah), attr))) return ah; if (ah) @@ -843,11 +901,14 @@ struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr) struct ibv_ah *mlx4_create_ah_ex(struct ibv_pd *pd, struct ibv_ah_attr_ex *attr_ex) { - struct ibv_port_attr port_attr; + struct ibv_port_attr_ex port_attr; struct ibv_ah *ah; struct mlx4_ah *mah; - if (ibv_query_port(pd->context, attr_ex->port_num, &port_attr)) + port_attr.comp_mask = IBV_QUERY_PORT_EX_ATTR_MASK1; + port_attr.mask1 = IBV_QUERY_PORT_EX_LINK_LAYER; + + if (ibv_query_port_ex(pd->context, attr_ex->port_num, &port_attr)) return NULL; ah = mlx4_create_ah_common(pd, (struct ibv_ah_attr *)attr_ex,