From patchwork Wed Jan 5 20:35:15 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Dillow X-Patchwork-Id: 454871 X-Patchwork-Delegate: dave@thedillows.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p05Ka8xs024333 for ; Wed, 5 Jan 2011 20:36:09 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752821Ab1AEUfr (ORCPT ); Wed, 5 Jan 2011 15:35:47 -0500 Received: from amavis-outgoing2.knology.net ([24.214.64.231]:50674 "EHLO amavis-outgoing2.knology.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752304Ab1AEUfV (ORCPT ); Wed, 5 Jan 2011 15:35:21 -0500 Received: from localhost (amavis-outbound2 [127.0.0.1]) by amavis-outgoing2.knology.net (Postfix) with ESMTP id 47F5BD004B; Wed, 5 Jan 2011 15:35:20 -0500 (EST) Received: from smtp5.knology.net ([192.168.200.214]) by localhost (amavis-outgoing2.knology.net [192.168.200.231]) (amavisd-new, port 10024) with LMTP id uzo-Ox5J3VkW; Wed, 5 Jan 2011 15:35:20 -0500 (EST) Received: from shed.thedillows.org (unknown [207.98.218.89]) by smtp5.knology.net (Postfix) with ESMTP id 483F8700C1; Wed, 5 Jan 2011 15:35:18 -0500 (EST) Received: from obelisk.thedillows.org (obelisk.thedillows.org [192.168.0.10]) by shed.thedillows.org (8.14.4/8.14.4) with ESMTP id p05KZHWY014211 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=NO); Wed, 5 Jan 2011 15:35:17 -0500 Received: from obelisk.thedillows.org (localhost [127.0.0.1]) by obelisk.thedillows.org (8.14.4/8.14.3) with ESMTP id p05KZHVQ030767; Wed, 5 Jan 2011 15:35:17 -0500 Received: (from dad@localhost) by obelisk.thedillows.org (8.14.4/8.14.4/Submit) id p05KZHSn030766; Wed, 5 Jan 2011 15:35:17 -0500 X-Authentication-Warning: obelisk.thedillows.org: dad set sender to dave@thedillows.org using -f From: David Dillow To: linux-rdma@vger.kernel.org Cc: linux-scsi@vger.kernel.org, bvanassche@acm.org Subject: [PATCH v2 7/8] IB/srp: stop sharing the host lock with SCSI Date: Wed, 5 Jan 2011 15:35:15 -0500 Message-Id: <1294259716-30706-8-git-send-email-dillowda@ornl.gov> X-Mailer: git-send-email 1.7.3.4 In-Reply-To: <1294259716-30706-1-git-send-email-dillowda@ornl.gov> References: <1294259716-30706-1-git-send-email-dillowda@ornl.gov> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Wed, 05 Jan 2011 20:36:09 +0000 (UTC) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 15c3ae3..2f62367 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -447,12 +447,12 @@ static bool srp_change_state(struct srp_target_port *target, { bool changed = false; - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); if (target->state == old) { target->state = new; changed = true; } - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); return changed; } @@ -555,11 +555,11 @@ static void srp_remove_req(struct srp_target_port *target, unsigned long flags; srp_unmap_data(req->scmnd, target, req); - spin_lock_irqsave(target->scsi_host->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_lim_delta; req->scmnd = NULL; list_add_tail(&req->list, &target->free_reqs); - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) @@ -634,13 +634,13 @@ err: * Schedule our work inside the lock to avoid a race with * the flush_scheduled_work() in srp_remove_one(). */ - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; INIT_WORK(&target->work, srp_remove_work); schedule_work(&target->work); } - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); return ret; } @@ -829,17 +829,16 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, { unsigned long flags; - spin_lock_irqsave(target->scsi_host->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); list_add(&iu->list, &target->free_tx); if (iu_type != SRP_IU_RSP) ++target->req_lim; - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); } /* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and free_tx. If IU is not sent, it must be returned using - * srp_put_tx_iu(). + * Must be called with target->lock held to protect req_lim and free_tx. + * If IU is not sent, it must be returned using srp_put_tx_iu(). * * Note: * An upper limit for the number of allocated information units for each @@ -920,9 +919,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) unsigned long flags; if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { - spin_lock_irqsave(target->scsi_host->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); target->req_lim += be32_to_cpu(rsp->req_lim_delta); - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); target->tsk_mgmt_status = -1; if (be32_to_cpu(rsp->resp_data_len) >= 4) @@ -963,10 +962,10 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta, struct srp_iu *iu; int err; - spin_lock_irqsave(target->scsi_host->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_delta; iu = __srp_get_tx_iu(target, SRP_IU_RSP); - spin_unlock_irqrestore(target->scsi_host->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); if (!iu) { shost_printk(KERN_ERR, target->scsi_host, PFX @@ -1131,14 +1130,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) return 0; } - spin_lock_irqsave(shost->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); iu = __srp_get_tx_iu(target, SRP_IU_CMD); if (iu) { req = list_first_entry(&target->free_reqs, struct srp_request, list); list_del(&req->list); } - spin_unlock_irqrestore(shost->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); if (!iu) goto err; @@ -1184,9 +1183,9 @@ err_unmap: err_iu: srp_put_tx_iu(target, iu, SRP_IU_CMD); - spin_lock_irqsave(shost->host_lock, flags); + spin_lock_irqsave(&target->lock, flags); list_add(&req->list, &target->free_reqs); - spin_unlock_irqrestore(shost->host_lock, flags); + spin_unlock_irqrestore(&target->lock, flags); err: return SCSI_MLQUEUE_HOST_BUSY; @@ -1451,9 +1450,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, init_completion(&target->tsk_mgmt_done); - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); if (!iu) return -1; @@ -1957,6 +1956,7 @@ static ssize_t srp_create_target(struct device *dev, target->scsi_host = target_host; target->srp_host = host; + spin_lock_init(&target->lock); INIT_LIST_HEAD(&target->free_tx); INIT_LIST_HEAD(&target->free_reqs); for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { @@ -2186,9 +2186,9 @@ static void srp_remove_one(struct ib_device *device) */ spin_lock(&host->target_lock); list_for_each_entry(target, &host->target_list, list) { - spin_lock_irq(target->scsi_host->host_lock); + spin_lock_irq(&target->lock); target->state = SRP_TARGET_REMOVED; - spin_unlock_irq(target->scsi_host->host_lock); + spin_unlock_irq(&target->lock); } spin_unlock(&host->target_lock); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 81686ee..acb435d 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -144,6 +144,8 @@ struct srp_target_port { struct srp_iu *rx_ring[SRP_RQ_SIZE]; + spinlock_t lock; + struct list_head free_tx; struct srp_iu *tx_ring[SRP_SQ_SIZE];