diff mbox

[01/12] scsi_transport_srp: Introduce srp_wait_for_queuecommand()

Message ID 5541EE4A.30803@sandisk.com (mailing list archive)
State Rejected
Headers show

Commit Message

Bart Van Assche April 30, 2015, 8:56 a.m. UTC
Introduce the helper function srp_wait_for_queuecommand().
Move the definition of scsi_request_fn_active(). This patch
does not change any functionality. A second call to
scsi_wait_for_queuecommand() will be introduced in the next
patch.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: James Bottomley <JBottomley@Odin.com>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: Sebastian Parschauer <sebastian.riemer@profitbricks.com>
Cc: <stable@vger.kernel.org> #v3.13
---
 drivers/scsi/scsi_transport_srp.c | 52 ++++++++++++++++++++++-----------------
 1 file changed, 29 insertions(+), 23 deletions(-)

Comments

Sagi Grimberg April 30, 2015, 9:32 a.m. UTC | #1
On 4/30/2015 11:56 AM, Bart Van Assche wrote:
> Introduce the helper function srp_wait_for_queuecommand().
> Move the definition of scsi_request_fn_active(). This patch
> does not change any functionality. A second call to
> scsi_wait_for_queuecommand() will be introduced in the next
> patch.
>
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: James Bottomley <JBottomley@Odin.com>
> Cc: Sagi Grimberg <sagig@mellanox.com>
> Cc: Sebastian Parschauer <sebastian.riemer@profitbricks.com>
> Cc: <stable@vger.kernel.org> #v3.13
> ---
>   drivers/scsi/scsi_transport_srp.c | 52 ++++++++++++++++++++++-----------------
>   1 file changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
> index ae45bd9..6ce1c48 100644
> --- a/drivers/scsi/scsi_transport_srp.c
> +++ b/drivers/scsi/scsi_transport_srp.c
> @@ -396,6 +396,34 @@ static void srp_reconnect_work(struct work_struct *work)
>   	}
>   }
>
> +/**
> + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
> + * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
> + */
> +static int scsi_request_fn_active(struct Scsi_Host *shost)
> +{
> +	struct scsi_device *sdev;
> +	struct request_queue *q;
> +	int request_fn_active = 0;
> +
> +	shost_for_each_device(sdev, shost) {
> +		q = sdev->request_queue;
> +
> +		spin_lock_irq(q->queue_lock);
> +		request_fn_active += q->request_fn_active;
> +		spin_unlock_irq(q->queue_lock);
> +	}
> +
> +	return request_fn_active;
> +}
> +
> +/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
> +static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
> +{
> +	while (scsi_request_fn_active(shost))
> +		msleep(20);
> +}
> +
>   static void __rport_fail_io_fast(struct srp_rport *rport)
>   {
>   	struct Scsi_Host *shost = rport_to_shost(rport);
> @@ -504,27 +532,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport)
>   EXPORT_SYMBOL(srp_start_tl_fail_timers);
>
>   /**
> - * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
> - * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
> - */
> -static int scsi_request_fn_active(struct Scsi_Host *shost)
> -{
> -	struct scsi_device *sdev;
> -	struct request_queue *q;
> -	int request_fn_active = 0;
> -
> -	shost_for_each_device(sdev, shost) {
> -		q = sdev->request_queue;
> -
> -		spin_lock_irq(q->queue_lock);
> -		request_fn_active += q->request_fn_active;
> -		spin_unlock_irq(q->queue_lock);
> -	}
> -
> -	return request_fn_active;
> -}
> -
> -/**
>    * srp_reconnect_rport() - reconnect to an SRP target port
>    * @rport: SRP target port.
>    *
> @@ -559,8 +566,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
>   	if (res)
>   		goto out;
>   	scsi_target_block(&shost->shost_gendev);
> -	while (scsi_request_fn_active(shost))
> -		msleep(20);
> +	srp_wait_for_queuecommand(shost);
>   	res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
>   	pr_debug("%s (state %d): transport.reconnect() returned %d\n",
>   		 dev_name(&shost->shost_gendev), rport->state, res);
>

Looks good.

Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 30, 2015, 9:37 a.m. UTC | #2
On Thu, Apr 30, 2015 at 10:56:42AM +0200, Bart Van Assche wrote:
> Introduce the helper function srp_wait_for_queuecommand().
> Move the definition of scsi_request_fn_active(). This patch
> does not change any functionality. A second call to
> scsi_wait_for_queuecommand() will be introduced in the next
> patch.

Can we take a step back here?

This isn;t something driver should be doing, so we really should take
care of this in the SCSI midlayer.

Especially given that we don't maintain request_fn_active for blk-mq.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche April 30, 2015, 10:26 a.m. UTC | #3
On 04/30/15 11:37, Christoph Hellwig wrote:
> On Thu, Apr 30, 2015 at 10:56:42AM +0200, Bart Van Assche wrote:
>> Introduce the helper function srp_wait_for_queuecommand().
>> Move the definition of scsi_request_fn_active(). This patch
>> does not change any functionality. A second call to
>> scsi_wait_for_queuecommand() will be introduced in the next
>> patch.
>
> Can we take a step back here?
>
> This isn;t something driver should be doing, so we really should take
> care of this in the SCSI midlayer.
>
> Especially given that we don't maintain request_fn_active for blk-mq.

Hello Christoph,

How about the following:
* Modify scsi_target_block() and scsi_target_unblock(...,
   SDEV_TRANSPORT_OFFLINE) such that these functions wait until
   active queuecommand() calls have finished.
* Ensure that this approach works not only for the traditional
   block layer but also for blk-mq.

With these changes it won't be necessary anymore to have something like 
srp_wait_for_queuecommand() in the SRP transport layer.

Bart.

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sagi Grimberg April 30, 2015, 10:32 a.m. UTC | #4
On 4/30/2015 1:26 PM, Bart Van Assche wrote:
> On 04/30/15 11:37, Christoph Hellwig wrote:
>> On Thu, Apr 30, 2015 at 10:56:42AM +0200, Bart Van Assche wrote:
>>> Introduce the helper function srp_wait_for_queuecommand().
>>> Move the definition of scsi_request_fn_active(). This patch
>>> does not change any functionality. A second call to
>>> scsi_wait_for_queuecommand() will be introduced in the next
>>> patch.
>>
>> Can we take a step back here?
>>
>> This isn;t something driver should be doing, so we really should take
>> care of this in the SCSI midlayer.
>>
>> Especially given that we don't maintain request_fn_active for blk-mq.
>
> Hello Christoph,
>
> How about the following:
> * Modify scsi_target_block() and scsi_target_unblock(...,
>    SDEV_TRANSPORT_OFFLINE) such that these functions wait until
>    active queuecommand() calls have finished.

Won't this make scsi_target_unblock a sleeping function? It might
not fit all the contexts scsi_target_unblock is called.

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche April 30, 2015, 10:58 a.m. UTC | #5
On 04/30/15 12:33, Sagi Grimberg wrote:
> On 4/30/2015 1:26 PM, Bart Van Assche wrote:
>> On 04/30/15 11:37, Christoph Hellwig wrote:
>>> On Thu, Apr 30, 2015 at 10:56:42AM +0200, Bart Van Assche wrote:
>>>> Introduce the helper function srp_wait_for_queuecommand().
>>>> Move the definition of scsi_request_fn_active(). This patch
>>>> does not change any functionality. A second call to
>>>> scsi_wait_for_queuecommand() will be introduced in the next
>>>> patch.
>>>
>>> Can we take a step back here?
>>>
>>> This isn;t something driver should be doing, so we really should take
>>> care of this in the SCSI midlayer.
>>>
>>> Especially given that we don't maintain request_fn_active for blk-mq.
>>
>> Hello Christoph,
>>
>> How about the following:
>> * Modify scsi_target_block() and scsi_target_unblock(...,
>>     SDEV_TRANSPORT_OFFLINE) such that these functions wait until
>>     active queuecommand() calls have finished.
>
> Won't this make scsi_target_unblock a sleeping function? It might
> not fit all the contexts scsi_target_unblock is called.

The only callers in upstream code of scsi_target_block() and 
scsi_target_unblock() I am aware of are the FC, iSCSI and SRP transport 
layers and the drivers that use these transport layers. As far as I can 
see both functions are always called from thread context and without 
holding any spinlocks. A possible alternative to what I proposed in my 
previous e-mail could be to provide a new function that waits for active 
queuecommand() calls and that has to be called explicitly.

Bart.


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sagi Grimberg April 30, 2015, 2:13 p.m. UTC | #6
On 4/30/2015 1:58 PM, Bart Van Assche wrote:
> On 04/30/15 12:33, Sagi Grimberg wrote:
>> On 4/30/2015 1:26 PM, Bart Van Assche wrote:
>>> On 04/30/15 11:37, Christoph Hellwig wrote:
>>>> On Thu, Apr 30, 2015 at 10:56:42AM +0200, Bart Van Assche wrote:
>>>>> Introduce the helper function srp_wait_for_queuecommand().
>>>>> Move the definition of scsi_request_fn_active(). This patch
>>>>> does not change any functionality. A second call to
>>>>> scsi_wait_for_queuecommand() will be introduced in the next
>>>>> patch.
>>>>
>>>> Can we take a step back here?
>>>>
>>>> This isn;t something driver should be doing, so we really should take
>>>> care of this in the SCSI midlayer.
>>>>
>>>> Especially given that we don't maintain request_fn_active for blk-mq.
>>>
>>> Hello Christoph,
>>>
>>> How about the following:
>>> * Modify scsi_target_block() and scsi_target_unblock(...,
>>>     SDEV_TRANSPORT_OFFLINE) such that these functions wait until
>>>     active queuecommand() calls have finished.
>>
>> Won't this make scsi_target_unblock a sleeping function? It might
>> not fit all the contexts scsi_target_unblock is called.
>
> The only callers in upstream code of scsi_target_block() and
> scsi_target_unblock() I am aware of are the FC, iSCSI and SRP transport
> layers and the drivers that use these transport layers. As far as I can
> see both functions are always called from thread context and without
> holding any spinlocks. A possible alternative to what I proposed in my
> previous e-mail could be to provide a new function that waits for active
> queuecommand() calls and that has to be called explicitly.
>

Maybe that might be better.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 30, 2015, 5:25 p.m. UTC | #7
On Thu, Apr 30, 2015 at 12:58:50PM +0200, Bart Van Assche wrote:
> The only callers in upstream code of scsi_target_block() and
> scsi_target_unblock() I am aware of are the FC, iSCSI and SRP transport
> layers and the drivers that use these transport layers. As far as I can see
> both functions are always called from thread context and without holding any
> spinlocks. A possible alternative to what I proposed in my previous e-mail
> could be to provide a new function that waits for active queuecommand()
> calls and that has to be called explicitly.

A separate helper sounds fine for now, although I suspect we'll
eventually migrate the call to it into scsi_target_block().
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index ae45bd9..6ce1c48 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -396,6 +396,34 @@  static void srp_reconnect_work(struct work_struct *work)
 	}
 }
 
+/**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
+ */
+static int scsi_request_fn_active(struct Scsi_Host *shost)
+{
+	struct scsi_device *sdev;
+	struct request_queue *q;
+	int request_fn_active = 0;
+
+	shost_for_each_device(sdev, shost) {
+		q = sdev->request_queue;
+
+		spin_lock_irq(q->queue_lock);
+		request_fn_active += q->request_fn_active;
+		spin_unlock_irq(q->queue_lock);
+	}
+
+	return request_fn_active;
+}
+
+/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
+static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
+{
+	while (scsi_request_fn_active(shost))
+		msleep(20);
+}
+
 static void __rport_fail_io_fast(struct srp_rport *rport)
 {
 	struct Scsi_Host *shost = rport_to_shost(rport);
@@ -504,27 +532,6 @@  void srp_start_tl_fail_timers(struct srp_rport *rport)
 EXPORT_SYMBOL(srp_start_tl_fail_timers);
 
 /**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
-	struct scsi_device *sdev;
-	struct request_queue *q;
-	int request_fn_active = 0;
-
-	shost_for_each_device(sdev, shost) {
-		q = sdev->request_queue;
-
-		spin_lock_irq(q->queue_lock);
-		request_fn_active += q->request_fn_active;
-		spin_unlock_irq(q->queue_lock);
-	}
-
-	return request_fn_active;
-}
-
-/**
  * srp_reconnect_rport() - reconnect to an SRP target port
  * @rport: SRP target port.
  *
@@ -559,8 +566,7 @@  int srp_reconnect_rport(struct srp_rport *rport)
 	if (res)
 		goto out;
 	scsi_target_block(&shost->shost_gendev);
-	while (scsi_request_fn_active(shost))
-		msleep(20);
+	srp_wait_for_queuecommand(shost);
 	res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
 	pr_debug("%s (state %d): transport.reconnect() returned %d\n",
 		 dev_name(&shost->shost_gendev), rport->state, res);