From patchwork Fri May 18 07:36:16 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Raju Rangoju X-Patchwork-Id: 10408343 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id E5269601F9 for ; Fri, 18 May 2018 07:36:55 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id CB81C28885 for ; Fri, 18 May 2018 07:36:55 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id C02932889B; Fri, 18 May 2018 07:36:55 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00, MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 476A528885 for ; Fri, 18 May 2018 07:36:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751363AbeERHgt (ORCPT ); Fri, 18 May 2018 03:36:49 -0400 Received: from stargate.chelsio.com ([12.32.117.8]:54182 "EHLO stargate.chelsio.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751994AbeERHgs (ORCPT ); Fri, 18 May 2018 03:36:48 -0400 Received: from localhost ([10.193.185.238]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id w4I7aZQQ022263; Fri, 18 May 2018 00:36:35 -0700 From: Raju Rangoju To: jgg@mellanox.com, dledford@redhat.com, linux-rdma@vger.kernel.org Cc: swise@opengridcomputing.com, bharat@chelsio.com, rajur@chelsio.com Subject: [PATCH rdma-core 1/2] cxgb4: refactor the flush logic Date: Fri, 18 May 2018 13:06:16 +0530 Message-Id: <20180518073617.26404-2-rajur@chelsio.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: <20180518073617.26404-1-rajur@chelsio.com> References: <20180518073617.26404-1-rajur@chelsio.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently c4iw_flush_qp() assumes that qp lock is held by the caller, now it has been changed so that c4iw_flush_qp() can lock everything once. And the locking hierarchy is rcq first, then scq (if different), then qp. Signed-off-by: Raju Rangoju Reviewed-by: Steve Wise --- providers/cxgb4/qp.c | 41 ++++++++++++++++++++++------------------- providers/cxgb4/verbs.c | 12 ++++-------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/providers/cxgb4/qp.c b/providers/cxgb4/qp.c index af04e3a1..46806341 100644 --- a/providers/cxgb4/qp.c +++ b/providers/cxgb4/qp.c @@ -488,44 +488,49 @@ static void update_qp_state(struct c4iw_qp *qhp) qhp->ibv_qp.state = attr.qp_state; } -/* - * Assumes qhp lock is held. - */ void c4iw_flush_qp(struct c4iw_qp *qhp) { struct c4iw_cq *rchp, *schp; int count; - if (qhp->wq.flushed) - return; - - update_qp_state(qhp); - rchp = to_c4iw_cq(qhp->ibv_qp.recv_cq); schp = to_c4iw_cq(qhp->ibv_qp.send_cq); PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); - qhp->wq.flushed = 1; - pthread_spin_unlock(&qhp->lock); /* locking heirarchy: cq lock first, then qp lock. */ pthread_spin_lock(&rchp->lock); + if (schp != rchp) + pthread_spin_lock(&schp->lock); pthread_spin_lock(&qhp->lock); + + if (qhp->wq.flushed) { + pthread_spin_unlock(&qhp->lock); + if (rchp != schp) + pthread_spin_unlock(&schp->lock); + pthread_spin_unlock(&rchp->lock); + return; + } + + qhp->wq.flushed = 1; + t4_set_wq_in_error(&qhp->wq); + + update_qp_state(qhp); + c4iw_flush_hw_cq(rchp); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_flush_rq(&qhp->wq, &rchp->cq, count); - pthread_spin_unlock(&qhp->lock); - pthread_spin_unlock(&rchp->lock); - /* locking heirarchy: cq lock first, then qp lock. */ - pthread_spin_lock(&schp->lock); - pthread_spin_lock(&qhp->lock); if (schp != rchp) c4iw_flush_hw_cq(schp); + c4iw_flush_sq(qhp); + pthread_spin_unlock(&qhp->lock); - pthread_spin_unlock(&schp->lock); - pthread_spin_lock(&qhp->lock); + if (schp != rchp) + pthread_spin_unlock(&schp->lock); + pthread_spin_unlock(&rchp->lock); + } void c4iw_flush_qps(struct c4iw_dev *dev) @@ -537,9 +542,7 @@ void c4iw_flush_qps(struct c4iw_dev *dev) struct c4iw_qp *qhp = dev->qpid2ptr[i]; if (qhp) { if (!qhp->wq.flushed && t4_wq_in_error(&qhp->wq)) { - pthread_spin_lock(&qhp->lock); c4iw_flush_qp(qhp); - pthread_spin_unlock(&qhp->lock); } } } diff --git a/providers/cxgb4/verbs.c b/providers/cxgb4/verbs.c index 3c493697..988b62a7 100644 --- a/providers/cxgb4/verbs.c +++ b/providers/cxgb4/verbs.c @@ -584,9 +584,9 @@ int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr, int ret; PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1); - pthread_spin_lock(&qhp->lock); if (t4_wq_in_error(&qhp->wq)) c4iw_flush_qp(qhp); + pthread_spin_lock(&qhp->lock); ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd); if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET) reset_qp(qhp); @@ -601,9 +601,7 @@ int c4iw_destroy_qp(struct ibv_qp *ibqp) struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device); PDBG("%s enter qp %p\n", __func__, ibqp); - pthread_spin_lock(&qhp->lock); c4iw_flush_qp(qhp); - pthread_spin_unlock(&qhp->lock); ret = ibv_cmd_destroy_qp(ibqp); if (ret) { @@ -635,9 +633,9 @@ int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr, struct c4iw_qp *qhp = to_c4iw_qp(ibqp); int ret; - pthread_spin_lock(&qhp->lock); if (t4_wq_in_error(&qhp->wq)) c4iw_flush_qp(qhp); + pthread_spin_lock(&qhp->lock); ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd); pthread_spin_unlock(&qhp->lock); return ret; @@ -659,9 +657,9 @@ int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid, struct c4iw_qp *qhp = to_c4iw_qp(ibqp); int ret; - pthread_spin_lock(&qhp->lock); if (t4_wq_in_error(&qhp->wq)) c4iw_flush_qp(qhp); + pthread_spin_lock(&qhp->lock); ret = ibv_cmd_attach_mcast(ibqp, gid, lid); pthread_spin_unlock(&qhp->lock); return ret; @@ -673,9 +671,9 @@ int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid, struct c4iw_qp *qhp = to_c4iw_qp(ibqp); int ret; - pthread_spin_lock(&qhp->lock); if (t4_wq_in_error(&qhp->wq)) c4iw_flush_qp(qhp); + pthread_spin_lock(&qhp->lock); ret = ibv_cmd_detach_mcast(ibqp, gid, lid); pthread_spin_unlock(&qhp->lock); return ret; @@ -694,9 +692,7 @@ void c4iw_async_event(struct ibv_async_event *event) case IBV_EVENT_QP_ACCESS_ERR: case IBV_EVENT_PATH_MIG_ERR: { struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp); - pthread_spin_lock(&qhp->lock); c4iw_flush_qp(qhp); - pthread_spin_unlock(&qhp->lock); break; } case IBV_EVENT_SQ_DRAINED: