From patchwork Thu Jul 30 14:50:20 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Haggai Eran X-Patchwork-Id: 6903331 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id DE0C89F358 for ; Thu, 30 Jul 2015 14:51:05 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id CCEF4204EA for ; Thu, 30 Jul 2015 14:51:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id B8906204E7 for ; Thu, 30 Jul 2015 14:51:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751132AbbG3Ou7 (ORCPT ); Thu, 30 Jul 2015 10:50:59 -0400 Received: from [193.47.165.129] ([193.47.165.129]:41001 "EHLO mellanox.co.il" rhost-flags-FAIL-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1751320AbbG3Ou6 (ORCPT ); Thu, 30 Jul 2015 10:50:58 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from haggaie@mellanox.com) with ESMTPS (AES256-SHA encrypted); 30 Jul 2015 17:50:31 +0300 Received: from arch003.mtl.labs.mlnx (arch003.mtl.labs.mlnx [10.137.35.1]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id t6UEoV6g014065; Thu, 30 Jul 2015 17:50:31 +0300 From: Haggai Eran To: Doug Ledford Cc: Liran Liss , Haggai Eran , linux-rdma@vger.kernel.org, Jason Gunthorpe , Yotam Kenneth , Shachar Raindel , Guy Shapiro Subject: [PATCH v4 08/14] IB/cma: Helper functions to access port space IDRs Date: Thu, 30 Jul 2015 17:50:20 +0300 Message-Id: <1438267826-32155-9-git-send-email-haggaie@mellanox.com> X-Mailer: git-send-email 1.7.11.2 In-Reply-To: <1438267826-32155-1-git-send-email-haggaie@mellanox.com> References: <1438267826-32155-1-git-send-email-haggaie@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Add helper functions to access the IDRs by port-space and port number. Pass around the port-space enum in cma.c instead of using pointers to port-space IDRs. Signed-off-by: Haggai Eran Signed-off-by: Yotam Kenneth Signed-off-by: Shachar Raindel Signed-off-by: Guy Shapiro --- drivers/infiniband/core/cma.c | 81 ++++++++++++++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 21 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cf5c48b0b7d5..f2d799209412 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -113,6 +113,22 @@ static DEFINE_IDR(udp_ps); static DEFINE_IDR(ipoib_ps); static DEFINE_IDR(ib_ps); +static struct idr *cma_idr(enum rdma_port_space ps) +{ + switch (ps) { + case RDMA_PS_TCP: + return &tcp_ps; + case RDMA_PS_UDP: + return &udp_ps; + case RDMA_PS_IPOIB: + return &ipoib_ps; + case RDMA_PS_IB: + return &ib_ps; + default: + return NULL; + } +} + struct cma_device { struct list_head list; struct ib_device *device; @@ -122,11 +138,33 @@ struct cma_device { }; struct rdma_bind_list { - struct idr *ps; + enum rdma_port_space ps; struct hlist_head owners; unsigned short port; }; +static int cma_ps_alloc(enum rdma_port_space ps, + struct rdma_bind_list *bind_list, int snum) +{ + struct idr *idr = cma_idr(ps); + + return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL); +} + +static struct rdma_bind_list *cma_ps_find(enum rdma_port_space ps, int snum) +{ + struct idr *idr = cma_idr(ps); + + return idr_find(idr, snum); +} + +static void cma_ps_remove(enum rdma_port_space ps, int snum) +{ + struct idr *idr = cma_idr(ps); + + idr_remove(idr, snum); +} + enum { CMA_OPTION_AFONLY, }; @@ -1069,7 +1107,7 @@ static void cma_release_port(struct rdma_id_private *id_priv) mutex_lock(&lock); hlist_del(&id_priv->node); if (hlist_empty(&bind_list->owners)) { - idr_remove(bind_list->ps, bind_list->port); + cma_ps_remove(bind_list->ps, bind_list->port); kfree(bind_list); } mutex_unlock(&lock); @@ -2365,8 +2403,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, hlist_add_head(&id_priv->node, &bind_list->owners); } -static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, - unsigned short snum) +static int cma_alloc_port(enum rdma_port_space ps, + struct rdma_id_private *id_priv, unsigned short snum) { struct rdma_bind_list *bind_list; int ret; @@ -2375,7 +2413,7 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, if (!bind_list) return -ENOMEM; - ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL); + ret = cma_ps_alloc(ps, bind_list, snum); if (ret < 0) goto err; @@ -2388,7 +2426,8 @@ err: return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; } -static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) +static int cma_alloc_any_port(enum rdma_port_space ps, + struct rdma_id_private *id_priv) { static unsigned int last_used_port; int low, high, remaining; @@ -2399,7 +2438,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) rover = prandom_u32() % remaining + low; retry: if (last_used_port != rover && - !idr_find(ps, (unsigned short) rover)) { + !cma_ps_find(ps, (unsigned short)rover)) { int ret = cma_alloc_port(ps, id_priv, rover); /* * Remember previously used port number in order to avoid @@ -2454,7 +2493,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, return 0; } -static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +static int cma_use_port(enum rdma_port_space ps, + struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list; unsigned short snum; @@ -2464,7 +2504,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; - bind_list = idr_find(ps, snum); + bind_list = cma_ps_find(ps, snum); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, snum); } else { @@ -2487,25 +2527,24 @@ static int cma_bind_listen(struct rdma_id_private *id_priv) return ret; } -static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv) +static enum rdma_port_space cma_select_inet_ps( + struct rdma_id_private *id_priv) { switch (id_priv->id.ps) { case RDMA_PS_TCP: - return &tcp_ps; case RDMA_PS_UDP: - return &udp_ps; case RDMA_PS_IPOIB: - return &ipoib_ps; case RDMA_PS_IB: - return &ib_ps; + return id_priv->id.ps; default: - return NULL; + + return 0; } } -static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) +static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) { - struct idr *ps = NULL; + enum rdma_port_space ps = 0; struct sockaddr_ib *sib; u64 sid_ps, mask, sid; @@ -2515,15 +2554,15 @@ static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { sid_ps = RDMA_IB_IP_PS_IB; - ps = &ib_ps; + ps = RDMA_PS_IB; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && (sid == (RDMA_IB_IP_PS_TCP & mask))) { sid_ps = RDMA_IB_IP_PS_TCP; - ps = &tcp_ps; + ps = RDMA_PS_TCP; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && (sid == (RDMA_IB_IP_PS_UDP & mask))) { sid_ps = RDMA_IB_IP_PS_UDP; - ps = &udp_ps; + ps = RDMA_PS_UDP; } if (ps) { @@ -2536,7 +2575,7 @@ static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) static int cma_get_port(struct rdma_id_private *id_priv) { - struct idr *ps; + enum rdma_port_space ps; int ret; if (cma_family(id_priv) != AF_IB)