@@ -46,6 +46,20 @@ static DEFINE_MUTEX(scsi_sense_cache_mutex);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
+/* For admin queue, its queuedata is NULL */
+static inline bool scsi_is_admin_queue(struct request_queue *q)
+{
+ return !q->queuedata;
+}
+
+/* This helper can only be used in req prep stage */
+static inline struct scsi_device *scsi_get_scsi_dev(struct request *rq)
+{
+ if (scsi_is_admin_queue(rq->q))
+ return scsi_req(rq)->sdev;
+ return rq->q->queuedata;
+}
+
static inline struct kmem_cache *
scsi_select_sense_cache(bool unchecked_isa_dma)
{
@@ -1376,10 +1390,9 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
}
static int
-scsi_prep_return(struct request_queue *q, struct request *req, int ret)
+scsi_prep_return(struct scsi_device *sdev, struct request_queue *q,
+ struct request *req, int ret)
{
- struct scsi_device *sdev = q->queuedata;
-
switch (ret) {
case BLKPREP_KILL:
case BLKPREP_INVALID:
@@ -1411,7 +1424,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
- struct scsi_device *sdev = q->queuedata;
+ struct scsi_device *sdev = scsi_get_scsi_dev(req);
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
int ret;
@@ -1436,7 +1449,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
ret = scsi_setup_cmnd(sdev, req);
out:
- return scsi_prep_return(q, req, ret);
+ return scsi_prep_return(sdev, q, req, ret);
}
static void scsi_unprep_fn(struct request_queue *q, struct request *req)
@@ -1613,6 +1626,9 @@ static int scsi_lld_busy(struct request_queue *q)
if (blk_queue_dying(q))
return 0;
+ if (WARN_ON_ONCE(scsi_is_admin_queue(q)))
+ return 0;
+
shost = sdev->host;
/*
@@ -1816,7 +1832,7 @@ static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
__acquires(q->queue_lock)
{
- struct scsi_device *sdev = q->queuedata;
+ struct scsi_device *sdev;
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
@@ -1825,7 +1841,6 @@ static void scsi_request_fn(struct request_queue *q)
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
- shost = sdev->host;
for (;;) {
int rtn;
/*
@@ -1837,6 +1852,10 @@ static void scsi_request_fn(struct request_queue *q)
if (!req)
break;
+ cmd = blk_mq_rq_to_pdu(req);
+ sdev = cmd->device;
+ shost = sdev->host;
+
if (unlikely(!scsi_device_online(sdev))) {
sdev_printk(KERN_ERR, sdev,
"rejecting I/O to offline device\n");
@@ -1854,7 +1873,6 @@ static void scsi_request_fn(struct request_queue *q)
blk_start_request(req);
spin_unlock_irq(q->queue_lock);
- cmd = blk_mq_rq_to_pdu(req);
if (cmd != req->special) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
@@ -2332,6 +2350,9 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
+ /* admin queue won't be exposed to external users */
+ WARN_ON_ONCE(scsi_is_admin_queue(q));
+
if (q->mq_ops) {
if (q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
Uses scsi_is_admin_queue() and scsi_get_scsi_dev() to retrieve 'scsi_device' for legacy path. The same approach can be used in SCSI_MQ path too, just not very efficiently, and will deal with that in the patch when introducing admin queue for SCSI_MQ. Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Christoph Hellwig <hch@lst.de> Cc: Bart Van Assche <bart.vanassche@wdc.com> Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: linux-scsi@vger.kernel.org Signed-off-by: Ming Lei <ming.lei@redhat.com> --- drivers/scsi/scsi_lib.c | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-)