diff mbox series

[v2,1/1] blk-mq: Inline request status checkers

Message ID e6fc239412811140c83de906b75689530661f65d.1569872122.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/1] blk-mq: Inline request status checkers | expand

Commit Message

Pavel Begunkov Sept. 30, 2019, 7:43 p.m. UTC
From: Pavel Begunkov <asml.silence@gmail.com>

blk_mq_request_completed() and blk_mq_request_started() are
short, inline it.

v2: by Christoph suggestion inline them by hand

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 block/blk-mq-tag.c       |  4 ++--
 block/blk-mq.c           | 14 +-------------
 block/blk-mq.h           |  9 ---------
 drivers/block/nbd.c      |  2 +-
 drivers/nvme/host/core.c |  2 +-
 include/linux/blk-mq.h   | 10 ++++++++--
 6 files changed, 13 insertions(+), 28 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 008388e82b5c..32d63b607391 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -282,7 +282,7 @@  static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 	 * test and set the bit before assining ->rqs[].
 	 */
 	rq = tags->rqs[bitnr];
-	if (rq && blk_mq_request_started(rq))
+	if (rq && blk_mq_rq_state(rq) != MQ_RQ_IDLE)
 		return iter_data->fn(rq, iter_data->data, reserved);
 
 	return true;
@@ -360,7 +360,7 @@  static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
 {
 	unsigned *count = data;
 
-	if (blk_mq_request_completed(rq))
+	if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
 		(*count)++;
 	return true;
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 29275f5a996f..a30f108d96be 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -663,18 +663,6 @@  bool blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
-int blk_mq_request_started(struct request *rq)
-{
-	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_started);
-
-int blk_mq_request_completed(struct request *rq)
-{
-	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
-}
-EXPORT_SYMBOL_GPL(blk_mq_request_completed);
-
 void blk_mq_start_request(struct request *rq)
 {
 	struct request_queue *q = rq->q;
@@ -718,7 +706,7 @@  static void __blk_mq_requeue_request(struct request *rq)
 	trace_block_rq_requeue(q, rq);
 	rq_qos_requeue(q, rq);
 
-	if (blk_mq_request_started(rq)) {
+	if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
 		rq->rq_flags &= ~RQF_TIMED_OUT;
 		if (q->dma_drain_size && blk_rq_bytes(rq))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 32c62c64e6c2..eaaca8fc1c28 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -128,15 +128,6 @@  extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_release(struct request_queue *q);
 
-/**
- * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
- * @rq: target request.
- */
-static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
-{
-	return READ_ONCE(rq->state);
-}
-
 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 					   unsigned int cpu)
 {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ac07e8c94c79..71a13a65c9d1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -696,7 +696,7 @@  static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
 	if (hwq < nbd->tag_set.nr_hw_queues)
 		req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
 				       blk_mq_unique_tag_to_tag(tag));
-	if (!req || !blk_mq_request_started(req)) {
+	if (!req || !blk_mq_rq_state(req) != MQ_RQ_IDLE) {
 		dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
 			tag, req);
 		return ERR_PTR(-ENOENT);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 108f60b46804..3a54677dc5ad 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -292,7 +292,7 @@  bool nvme_cancel_request(struct request *req, void *data, bool reserved)
 				"Cancelling I/O %d", req->tag);
 
 	/* don't abort one completed request */
-	if (blk_mq_request_completed(req))
+	if (blk_mq_rq_state(req) == MQ_RQ_COMPLETE)
 		return true;
 
 	nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0bf056de5cc3..e993ce19a8ec 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -301,9 +301,15 @@  static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
 }
 
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+	return READ_ONCE(rq->state);
+}
 
-int blk_mq_request_started(struct request *rq);
-int blk_mq_request_completed(struct request *rq);
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, blk_status_t error);
 void __blk_mq_end_request(struct request *rq, blk_status_t error);