diff mbox series

[v2] blk-mq: don't call ktime_get_ns() if we don't need it

Message ID 73f58a60-cfa8-5736-d42a-9297601a1f71@kernel.dk (mailing list archive)
State New, archived
Headers show
Series [v2] blk-mq: don't call ktime_get_ns() if we don't need it | expand

Commit Message

Jens Axboe Nov. 30, 2018, 9:13 p.m. UTC
We only need the request fields and the end_io time if we have
stats enabled, or if we have a scheduler attached as those may
use it for completion time stats.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

---

v2: add helper, use it in both spots. also clear ->start_time_ns
    so merging doesn't read garbage.

Comments

Omar Sandoval Dec. 3, 2018, 8:22 p.m. UTC | #1
On Fri, Nov 30, 2018 at 02:13:54PM -0700, Jens Axboe wrote:
> We only need the request fields and the end_io time if we have
> stats enabled, or if we have a scheduler attached as those may
> use it for completion time stats.

Reviewed-by: Omar Sandoval <osandov@fb.com>

> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> 
> ---
> 
> v2: add helper, use it in both spots. also clear ->start_time_ns
>     so merging doesn't read garbage.
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 7dcef565dc0f..e09d7f500077 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -281,6 +281,15 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
>  }
>  EXPORT_SYMBOL(blk_mq_can_queue);
>  
> +/*
> + * Only need start/end time stamping if we have stats enabled, or using
> + * an IO scheduler.
> + */
> +static inline bool blk_mq_need_time_stamp(struct request *rq)
> +{
> +	return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
> +}
> +
>  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
>  		unsigned int tag, unsigned int op)
>  {
> @@ -316,7 +325,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
>  	RB_CLEAR_NODE(&rq->rb_node);
>  	rq->rq_disk = NULL;
>  	rq->part = NULL;
> -	rq->start_time_ns = ktime_get_ns();
> +	if (blk_mq_need_time_stamp(rq))
> +		rq->start_time_ns = ktime_get_ns();
> +	else
> +		rq->start_time_ns = 0;
>  	rq->io_start_time_ns = 0;
>  	rq->nr_phys_segments = 0;
>  #if defined(CONFIG_BLK_DEV_INTEGRITY)
> @@ -522,7 +534,10 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
>  
>  inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
>  {
> -	u64 now = ktime_get_ns();
> +	u64 now = 0;
> +
> +	if (blk_mq_need_time_stamp(rq))
> +		now = ktime_get_ns();
>  
>  	if (rq->rq_flags & RQF_STATS) {
>  		blk_mq_poll_stats_start(rq->q);
> -- 
> Jens Axboe
>
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7dcef565dc0f..e09d7f500077 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -281,6 +281,15 @@  bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
+/*
+ * Only need start/end time stamping if we have stats enabled, or using
+ * an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+	return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
+}
+
 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 		unsigned int tag, unsigned int op)
 {
@@ -316,7 +325,10 @@  static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 	RB_CLEAR_NODE(&rq->rb_node);
 	rq->rq_disk = NULL;
 	rq->part = NULL;
-	rq->start_time_ns = ktime_get_ns();
+	if (blk_mq_need_time_stamp(rq))
+		rq->start_time_ns = ktime_get_ns();
+	else
+		rq->start_time_ns = 0;
 	rq->io_start_time_ns = 0;
 	rq->nr_phys_segments = 0;
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -522,7 +534,10 @@  EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
-	u64 now = ktime_get_ns();
+	u64 now = 0;
+
+	if (blk_mq_need_time_stamp(rq))
+		now = ktime_get_ns();
 
 	if (rq->rq_flags & RQF_STATS) {
 		blk_mq_poll_stats_start(rq->q);