diff mbox

[5/6] mtip32xx: convert internal command issue to block IO path

Message ID 1493389911-19512-6-git-send-email-axboe@fb.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jens Axboe April 28, 2017, 2:31 p.m. UTC
The driver special cases certain things for command issue, depending
on whether it's an internal command or not. Make the internal commands
use the regular infrastructure for issuing IO.

Since this is an 8-group souped up AHCI variant, we have to deal
with NCQ vs non-queueable commands. Do this from the queue_rq
handler, by backing off unless the drive is idle.

Signed-off-by: Jens Axboe <axboe@fb.com>
---
 drivers/block/mtip32xx/mtip32xx.c | 103 +++++++++++++++++++++++++++-----------
 1 file changed, 73 insertions(+), 30 deletions(-)

Comments

Christoph Hellwig April 28, 2017, 2:49 p.m. UTC | #1
> @@ -1088,6 +1088,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
>  	return -EFAULT;
>  }
>  
> +struct mtip_int_cmd {
> +	int fis_len;
> +	dma_addr_t buffer;
> +	int buf_len;
> +	u32 opts;
> +};

I know passing the dma_addr is probably the easier conversion for now,
but using blk_rq_map_kern would be the cleaner way going forward.

> +	/* insert request and run queue */
> +	blk_execute_rq_nowait(rq->q, NULL, rq, true, NULL);
> +
> +	wait_for_completion(&wait);

Why not blk_execute_rq?

> @@ -3770,6 +3803,9 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
>  
>  	mtip_init_cmd_header(rq);
>  
> +	if (rq->rq_flags & RQF_RESERVED)

And in fact I don't think we'd even need the helper I suggested before,
we can just check for REQ_OP_DRV_IN here.

But while we're at it - one oddity in mtip32xx is that it converts
discards to an internal command from ->queue_rq, so we end up using
two requests for it.  Just handling discards here would be a nice
improvement.  It would also easily allow the driver to support ranged
trims..

But I guess I'm simply to picky and we should just fix up the worst issues
first..
Jens Axboe April 28, 2017, 4:43 p.m. UTC | #2
On 04/28/2017 08:49 AM, Christoph Hellwig wrote:
>> @@ -1088,6 +1088,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
>>  	return -EFAULT;
>>  }
>>  
>> +struct mtip_int_cmd {
>> +	int fis_len;
>> +	dma_addr_t buffer;
>> +	int buf_len;
>> +	u32 opts;
>> +};
> 
> I know passing the dma_addr is probably the easier conversion for now,
> but using blk_rq_map_kern would be the cleaner way going forward.
> 
>> +	/* insert request and run queue */
>> +	blk_execute_rq_nowait(rq->q, NULL, rq, true, NULL);
>> +
>> +	wait_for_completion(&wait);
> 
> Why not blk_execute_rq?

The internal requests don't go through the normal end_request part.
We can do that too of course, but I think we should keep it simple
to start to iron the issues out. Goes for using blk_rq_map_kern()
as well.

>> @@ -3770,6 +3803,9 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
>>  
>>  	mtip_init_cmd_header(rq);
>>  
>> +	if (rq->rq_flags & RQF_RESERVED)
> 
> And in fact I don't think we'd even need the helper I suggested before,
> we can just check for REQ_OP_DRV_IN here.

True, we can just use blk_rq_is_passthrough here, I'll do that.

> But while we're at it - one oddity in mtip32xx is that it converts
> discards to an internal command from ->queue_rq, so we end up using
> two requests for it.  Just handling discards here would be a nice
> improvement.  It would also easily allow the driver to support ranged
> trims..
> 
> But I guess I'm simply to picky and we should just fix up the worst issues
> first..

All of these are good suggestions, but yes, I think we should do the
worst issues first, so we can kill the NO_SCHED flag.
diff mbox

Patch

diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index aee94f260725..90f14f799527 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -195,7 +195,7 @@  static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
 	if (mtip_check_surprise_removal(dd->pdev))
 		return NULL;
 
-	rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+	rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
 	if (IS_ERR(rq))
 		return NULL;
 
@@ -1088,6 +1088,13 @@  static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
 	return -EFAULT;
 }
 
+struct mtip_int_cmd {
+	int fis_len;
+	dma_addr_t buffer;
+	int buf_len;
+	u32 opts;
+};
+
 /*
  * Execute an internal command and wait for the completion.
  *
@@ -1114,10 +1121,16 @@  static int mtip_exec_internal_command(struct mtip_port *port,
 					u32 opts,
 					unsigned long timeout)
 {
-	struct mtip_cmd_sg *command_sg;
 	DECLARE_COMPLETION_ONSTACK(wait);
 	struct mtip_cmd *int_cmd;
 	struct driver_data *dd = port->dd;
+	struct request *rq;
+	struct mtip_int_cmd icmd = {
+		.fis_len = fis_len,
+		.buffer = buffer,
+		.buf_len = buf_len,
+		.opts = opts
+	};
 	int rv = 0;
 	unsigned long start;
 
@@ -1132,6 +1145,8 @@  static int mtip_exec_internal_command(struct mtip_port *port,
 		dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
 		return -EFAULT;
 	}
+	rq = blk_mq_rq_from_pdu(int_cmd);
+	rq->end_io_data = &icmd;
 
 	set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
 
@@ -1158,35 +1173,16 @@  static int mtip_exec_internal_command(struct mtip_port *port,
 	/* Copy the command to the command table */
 	memcpy(int_cmd->command, fis, fis_len*4);
 
-	/* Populate the SG list */
-	int_cmd->command_header->opts =
-		 __force_bit2int cpu_to_le32(opts | fis_len);
-	if (buf_len) {
-		command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
-
-		command_sg->info =
-			__force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
-		command_sg->dba	=
-			__force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
-		command_sg->dba_upper =
-			__force_bit2int cpu_to_le32((buffer >> 16) >> 16);
-
-		int_cmd->command_header->opts |=
-			__force_bit2int cpu_to_le32((1 << 16));
-	}
-
-	/* Populate the command header */
-	int_cmd->command_header->byte_count = 0;
-
 	start = jiffies;
+	rq->timeout = timeout;
 
-	/* Issue the command to the hardware */
-	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+	/* insert request and run queue */
+	blk_execute_rq_nowait(rq->q, NULL, rq, true, NULL);
+
+	wait_for_completion(&wait);
+	rv = int_cmd->status;
 
-	/* Wait for the command to complete or timeout. */
-	rv = wait_for_completion_interruptible_timeout(&wait,
-				msecs_to_jiffies(timeout));
-	if (rv <= 0) {
+	if (rv < 0) {
 		if (rv == -ERESTARTSYS) { /* interrupted */
 			dev_err(&dd->pdev->dev,
 				"Internal command [%02X] was interrupted after %u ms\n",
@@ -1217,7 +1213,6 @@  static int mtip_exec_internal_command(struct mtip_port *port,
 		goto exec_ic_exit;
 	}
 
-	rv = 0;
 	if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
 			& (1 << MTIP_TAG_INTERNAL)) {
 		rv = -ENXIO;
@@ -3762,6 +3757,44 @@  static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
 	return false;
 }
 
+static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+				   struct request *rq)
+{
+	struct driver_data *dd = hctx->queue->queuedata;
+	struct mtip_int_cmd *icmd = rq->end_io_data;
+	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+	struct mtip_cmd_sg *command_sg;
+
+	if (mtip_commands_active(dd->port))
+		return BLK_MQ_RQ_QUEUE_BUSY;
+
+	rq->end_io_data = NULL;
+
+	/* Populate the SG list */
+	cmd->command_header->opts =
+		 __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
+	if (icmd->buf_len) {
+		command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+		command_sg->info =
+			__force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
+		command_sg->dba	=
+			__force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
+		command_sg->dba_upper =
+			__force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
+
+		cmd->command_header->opts |=
+			__force_bit2int cpu_to_le32((1 << 16));
+	}
+
+	/* Populate the command header */
+	cmd->command_header->byte_count = 0;
+
+	blk_mq_start_request(rq);
+	mtip_issue_non_ncq_command(dd->port, rq->tag);
+	return BLK_MQ_RQ_QUEUE_OK;
+}
+
 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
 			 const struct blk_mq_queue_data *bd)
 {
@@ -3770,6 +3803,9 @@  static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	mtip_init_cmd_header(rq);
 
+	if (rq->rq_flags & RQF_RESERVED)
+		return mtip_issue_reserved_cmd(hctx, rq);
+
 	if (unlikely(mtip_check_unal_depth(hctx, rq)))
 		return BLK_MQ_RQ_QUEUE_BUSY;
 
@@ -3825,8 +3861,14 @@  static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
 {
 	struct driver_data *dd = req->q->queuedata;
 
-	if (reserved)
+	if (reserved) {
+		struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
+
+		cmd->status = -ETIME;
+		if (cmd->comp_func)
+			cmd->comp_func(dd->port, MTIP_TAG_INTERNAL, cmd, -ETIME);
 		goto exit_handler;
+	}
 
 	if (test_bit(req->tag, dd->port->cmds_to_issue))
 		goto exit_handler;
@@ -4063,6 +4105,7 @@  static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
 	} else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
 
 		cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+		cmd->status = -ENODEV;
 		if (cmd->comp_func)
 			cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
 					cmd, -ENODEV);