@@ -1893,6 +1893,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
return q;
}
+EXPORT_SYMBOL_GPL(scsi_mq_alloc_queue);
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
@@ -2568,7 +2569,6 @@ scsi_device_quiesce(struct scsi_device *sdev)
return err;
}
-EXPORT_SYMBOL(scsi_device_quiesce);
/**
* scsi_device_resume - Restart user issued commands to a quiesced device.
@@ -2597,30 +2597,30 @@ void scsi_device_resume(struct scsi_device *sdev)
EXPORT_SYMBOL(scsi_device_resume);
static void
-device_quiesce_fn(struct scsi_device *sdev, void *data)
+device_freeze_fn(struct scsi_device *sdev, void *data)
{
- scsi_device_quiesce(sdev);
+ blk_mq_freeze_queue(sdev->request_queue);
}
void
-scsi_target_quiesce(struct scsi_target *starget)
+scsi_target_freeze(struct scsi_target *starget)
{
- starget_for_each_device(starget, NULL, device_quiesce_fn);
+ starget_for_each_device(starget, NULL, device_freeze_fn);
}
-EXPORT_SYMBOL(scsi_target_quiesce);
+EXPORT_SYMBOL(scsi_target_freeze);
static void
-device_resume_fn(struct scsi_device *sdev, void *data)
+device_unfreeze_fn(struct scsi_device *sdev, void *data)
{
- scsi_device_resume(sdev);
+ blk_mq_unfreeze_queue(sdev->request_queue);
}
void
-scsi_target_resume(struct scsi_target *starget)
+scsi_target_unfreeze(struct scsi_target *starget)
{
- starget_for_each_device(starget, NULL, device_resume_fn);
+ starget_for_each_device(starget, NULL, device_unfreeze_fn);
}
-EXPORT_SYMBOL(scsi_target_resume);
+EXPORT_SYMBOL(scsi_target_unfreeze);
/**
* scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
@@ -96,6 +96,8 @@ extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
+extern int scsi_device_quiesce(struct scsi_device *sdev);
+extern void scsi_device_resume(struct scsi_device *sdev);
struct request_queue;
struct request;
@@ -997,59 +997,79 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
+ struct request_queue *q2;
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
/*
- * Because this function and the power management code both call
- * scsi_device_quiesce(), it is not safe to perform domain validation
- * while suspend or resume is in progress. Hence the
- * lock/unlock_system_sleep() calls.
+ * Because this function creates a new request queue that is not
+ * visible to the rest of the system, this function must be serialized
+ * against suspend, resume and runtime power management. Hence the
+ * lock/unlock_system_sleep() and scsi_autopm_{get,put}_device()
+ * calls.
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
-
- spi_dv_in_progress(starget) = 1;
+ goto put_autopm;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
-
- /* We need to verify that the actual device will quiesce; the
- * later target quiesce is just a nice to have */
- if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
-
- scsi_target_quiesce(starget);
+ goto put_sdev;
spi_dv_pending(starget) = 1;
+
mutex_lock(&spi_dv_mutex(starget));
+ if (unlikely(spi_dv_in_progress(starget)))
+ goto clear_pending;
+
+ spi_dv_in_progress(starget) = 1;
starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n");
- spi_dv_device_internal(sdev, sdev->request_queue, buffer);
+ q2 = scsi_mq_alloc_queue(sdev);
+
+ if (q2) {
+ /*
+ * Freeze the target such that no other subsystem can submit
+ * SCSI commands to 'sdev'. Submitting SCSI commands through
+ * q2 may trigger the SCSI error handler. The SCSI error
+ * handler must be able to handle a frozen sdev->request_queue
+ * and must also use blk_mq_rq_from_pdu(q2)->q instead of
+ * sdev->request_queue if it would be necessary to access q2
+ * directly.
+ */
+ scsi_target_freeze(starget);
+ spi_dv_device_internal(sdev, q2, buffer);
+ blk_cleanup_queue(q2);
+ scsi_target_unfreeze(starget);
+ }
starget_printk(KERN_INFO, starget, "Ending Domain Validation\n");
- mutex_unlock(&spi_dv_mutex(starget));
- spi_dv_pending(starget) = 0;
-
- scsi_target_resume(starget);
-
spi_initial_dv(starget) = 1;
+ spi_dv_in_progress(starget) = 0;
+
+clear_pending:
+ spi_dv_pending(starget) = 0;
+ mutex_unlock(&spi_dv_mutex(starget));
- out_free:
kfree(buffer);
- out_put:
- spi_dv_in_progress(starget) = 0;
+
+put_sdev:
scsi_device_put(sdev);
-unlock:
+
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
@@ -423,10 +423,8 @@ extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt);
extern void sdev_evt_send_simple(struct scsi_device *sdev,
enum scsi_device_event evt_type, gfp_t gfpflags);
-extern int scsi_device_quiesce(struct scsi_device *sdev);
-extern void scsi_device_resume(struct scsi_device *sdev);
-extern void scsi_target_quiesce(struct scsi_target *);
-extern void scsi_target_resume(struct scsi_target *);
+extern void scsi_target_freeze(struct scsi_target *);
+extern void scsi_target_unfreeze(struct scsi_target *);
extern void scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun,
enum scsi_scan_mode rescan);