@@ -1250,6 +1250,12 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
break;
case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
+ /* q lock is held only in the non-mq case */
+ if (req->q->mq_ops)
+ blk_mq_stop_hw_queues(req->q);
+ else
+ blk_stop_queue(req->q);
+
ret = BLKPREP_DEFER;
break;
case SDEV_QUIESCE:
@@ -2611,7 +2617,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_QUIESCE:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
- case SDEV_BLOCK:
break;
default:
goto illegal;
@@ -2844,10 +2849,12 @@ static int scsi_request_fn_active(struct scsi_device *sdev)
*/
static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
{
- WARN_ON_ONCE(sdev->host->use_blk_mq);
-
- while (scsi_request_fn_active(sdev))
- msleep(20);
+ if (sdev->request_queue->mq_ops) {
+ synchronize_rcu();
+ } else {
+ while (scsi_request_fn_active(sdev))
+ msleep(20);
+ }
}
/**
@@ -2953,8 +2960,6 @@ EXPORT_SYMBOL(scsi_target_resume);
int
scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{
- struct request_queue *q = sdev->request_queue;
- unsigned long flags;
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
@@ -2970,23 +2975,27 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
* block layer from calling the midlayer with this device's
* request queue.
*/
+ if (wait)
+ scsi_wait_for_queuecommand(sdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_internal_device_block);
+
+void scsi_start_queue(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ unsigned long flags;
+
if (q->mq_ops) {
- if (wait)
- blk_mq_quiesce_queue(q);
- else
- blk_mq_stop_hw_queues(q);
+ blk_mq_start_stopped_hw_queues(q, false);
} else {
spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
+ blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (wait)
- scsi_wait_for_queuecommand(sdev);
}
-
- return 0;
}
-EXPORT_SYMBOL_GPL(scsi_internal_device_block);
-
+
/**
* scsi_internal_device_unblock - resume a device after a block request
* @sdev: device to resume
@@ -3007,9 +3016,6 @@ int
scsi_internal_device_unblock(struct scsi_device *sdev,
enum scsi_device_state new_state)
{
- struct request_queue *q = sdev->request_queue;
- unsigned long flags;
-
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
@@ -3027,13 +3033,7 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
- if (q->mq_ops) {
- blk_mq_start_stopped_hw_queues(q, false);
- } else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ scsi_start_queue(sdev);
return 0;
}
@@ -89,6 +89,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
+extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void);
@@ -1282,8 +1282,17 @@ void __scsi_remove_device(struct scsi_device *sdev)
return;
if (sdev->is_visible) {
- if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
- return;
+ /*
+ * If blocked, we go straight to DEL so any commands
+ * issued during the driver shutdown (like sync cache)
+ * are errored
+ */
+ if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) {
+ if (scsi_device_set_state(sdev, SDEV_DEL) != 0)
+ return;
+
+ scsi_start_queue(sdev);
+ }
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);