diff mbox series

[05/13] nvme-pci: consolidate code for polling non-dedicated queues

Message ID 20181202164628.1116-6-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/13] block: move queues types to the block layer | expand

Commit Message

Christoph Hellwig Dec. 2, 2018, 4:46 p.m. UTC
We have three places that can poll for I/O completions on a normal
interrupt-enabled queue.  All of them are in slow path code, so
consolidate them to a single helper that uses spin_lock_irqsave and
removes the fast path cqe_pending check.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 35 ++++++++++++-----------------------
 1 file changed, 12 insertions(+), 23 deletions(-)

Comments

Sagi Grimberg Dec. 4, 2018, 12:58 a.m. UTC | #1
> +static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)

Do we still need to carry the tag around?

Other than that,

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Christoph Hellwig Dec. 4, 2018, 3:04 p.m. UTC | #2
On Mon, Dec 03, 2018 at 04:58:25PM -0800, Sagi Grimberg wrote:
>
>> +static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
>
> Do we still need to carry the tag around?

Yes, the timeout handler polls for a specific tag.
Sagi Grimberg Dec. 4, 2018, 5:13 p.m. UTC | #3
>>> +static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
>>
>> Do we still need to carry the tag around?
> 
> Yes, the timeout handler polls for a specific tag.

Does it have to? the documentation suggests that we missed
an interrupt, so it is probably waiting on the completion queue.

I'd say that it'd be better if the tag search would be implemented
on the timeout handler alone so we don't have to pass the tag around
for everyone... Thoughts?
Jens Axboe Dec. 4, 2018, 6:19 p.m. UTC | #4
On 12/4/18 10:13 AM, Sagi Grimberg wrote:
> 
>>>> +static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
>>>
>>> Do we still need to carry the tag around?
>>
>> Yes, the timeout handler polls for a specific tag.
> 
> Does it have to? the documentation suggests that we missed
> an interrupt, so it is probably waiting on the completion queue.
> 
> I'd say that it'd be better if the tag search would be implemented
> on the timeout handler alone so we don't have to pass the tag around
> for everyone... Thoughts?

Without that you don't know if that's the request that completed. You
have to be able to look that up from the timeout handler.
diff mbox series

Patch

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d42bb76e5e78..10c26a2e355a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1072,17 +1072,19 @@  static irqreturn_t nvme_irq_check(int irq, void *data)
 	return IRQ_NONE;
 }
 
-static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
+/*
+ * Poll for completions any queue, including those not dedicated to polling.
+ * Can be called from any context.
+ */
+static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
 {
+	unsigned long flags;
 	u16 start, end;
 	int found;
 
-	if (!nvme_cqe_pending(nvmeq))
-		return 0;
-
-	spin_lock_irq(&nvmeq->cq_lock);
+	spin_lock_irqsave(&nvmeq->cq_lock, flags);
 	found = nvme_process_cq(nvmeq, &start, &end, tag);
-	spin_unlock_irq(&nvmeq->cq_lock);
+	spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
 
 	nvme_complete_cqes(nvmeq, start, end);
 	return found;
@@ -1279,7 +1281,7 @@  static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 	/*
 	 * Did we miss an interrupt?
 	 */
-	if (__nvme_poll(nvmeq, req->tag)) {
+	if (nvme_poll_irqdisable(nvmeq, req->tag)) {
 		dev_warn(dev->ctrl.device,
 			 "I/O %d QID %d timeout, completion polled\n",
 			 req->tag, nvmeq->qid);
@@ -1406,18 +1408,13 @@  static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
 	struct nvme_queue *nvmeq = &dev->queues[0];
-	u16 start, end;
 
 	if (shutdown)
 		nvme_shutdown_ctrl(&dev->ctrl);
 	else
 		nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
 
-	spin_lock_irq(&nvmeq->cq_lock);
-	nvme_process_cq(nvmeq, &start, &end, -1);
-	spin_unlock_irq(&nvmeq->cq_lock);
-
-	nvme_complete_cqes(nvmeq, start, end);
+	nvme_poll_irqdisable(nvmeq, -1);
 }
 
 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
@@ -2217,17 +2214,9 @@  static void nvme_del_queue_end(struct request *req, blk_status_t error)
 static void nvme_del_cq_end(struct request *req, blk_status_t error)
 {
 	struct nvme_queue *nvmeq = req->end_io_data;
-	u16 start, end;
 
-	if (!error) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&nvmeq->cq_lock, flags);
-		nvme_process_cq(nvmeq, &start, &end, -1);
-		spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
-
-		nvme_complete_cqes(nvmeq, start, end);
-	}
+	if (!error)
+		nvme_poll_irqdisable(nvmeq, -1);
 
 	nvme_del_queue_end(req, error);
 }