diff mbox

[V5,15/17] block: track request size in blk_issue_stat

Message ID 76954496f30da4902288fc027a0ed74a232535e5.1481833017.git.shli@fb.com (mailing list archive)
State New, archived
Headers show

Commit Message

Shaohua Li Dec. 15, 2016, 8:33 p.m. UTC
Currently there is no way to know the request size when the request is
finished. Next patch will need this info, so add to blk_issue_stat. With
this, we will have 49bits to track time, which still is very long time.

Signed-off-by: Shaohua Li <shli@fb.com>
---
 block/blk-core.c          |  2 +-
 block/blk-mq.c            |  2 +-
 block/blk-stat.c          |  7 ++++---
 block/blk-stat.h          | 29 +++++++++++++++++++++++------
 block/blk-wbt.h           | 10 +++++-----
 include/linux/blk_types.h |  2 +-
 6 files changed, 35 insertions(+), 17 deletions(-)

Comments

kernel test robot Dec. 16, 2016, 2:01 a.m. UTC | #1
Hi Shaohua,

[auto build test WARNING on block/for-next]
[also build test WARNING on next-20161215]
[cannot apply to v4.9]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Shaohua-Li/blk-throttle-add-low-limit/20161216-093257
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: i386-randconfig-s0-201650 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   block/blk-stat.c: In function 'blk_stat_set_issue':
>> block/blk-stat.c:243:26: warning: left shift count >= width of type [-Wshift-count-overflow]
      (blk_capped_size(size) << BLK_STAT_SIZE_SHIFT);
                             ^~

vim +243 block/blk-stat.c

   227			queue_for_each_hw_ctx(q, hctx, i) {
   228				hctx_for_each_ctx(hctx, ctx, j) {
   229					blk_stat_init(&ctx->stat[BLK_STAT_READ]);
   230					blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
   231				}
   232			}
   233		} else {
   234			blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
   235			blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
   236		}
   237	}
   238	
   239	void blk_stat_set_issue(struct blk_issue_stat *stat, sector_t size)
   240	{
   241		stat->stat = (stat->stat & BLK_STAT_RES_MASK) |
   242			(ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK) |
 > 243			(blk_capped_size(size) << BLK_STAT_SIZE_SHIFT);
   244	}
   245	
   246	/*
   247	 * Enable stat tracking, return whether it was enabled
   248	 */
   249	bool blk_stat_enable(struct request_queue *q)
   250	{
   251		if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
Tejun Heo Jan. 9, 2017, 9:17 p.m. UTC | #2
On Thu, Dec 15, 2016 at 12:33:06PM -0800, Shaohua Li wrote:
> Currently there is no way to know the request size when the request is
> finished. Next patch will need this info, so add to blk_issue_stat. With
> this, we will have 49bits to track time, which still is very long time.

Not necessarily an objection but do we really need to overload the
size field?  Would a normal extra field hurt too much?

Thanks.
diff mbox

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 61ba08c..485c32d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2459,7 +2459,7 @@  void blk_start_request(struct request *req)
 	blk_dequeue_request(req);
 
 	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
-		blk_stat_set_issue_time(&req->issue_stat);
+		blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
 		req->rq_flags |= RQF_STATS;
 		wbt_issue(req->q->rq_wb, &req->issue_stat);
 	}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4bf850e..891db62 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -474,7 +474,7 @@  void blk_mq_start_request(struct request *rq)
 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
 
 	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
-		blk_stat_set_issue_time(&rq->issue_stat);
+		blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
 		rq->rq_flags |= RQF_STATS;
 		wbt_issue(q->rq_wb, &rq->issue_stat);
 	}
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 9b43efb..0469855 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -236,10 +236,11 @@  void blk_stat_clear(struct request_queue *q)
 	}
 }
 
-void blk_stat_set_issue_time(struct blk_issue_stat *stat)
+void blk_stat_set_issue(struct blk_issue_stat *stat, sector_t size)
 {
-	stat->time = (stat->time & BLK_STAT_MASK) |
-			(ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
+	stat->stat = (stat->stat & BLK_STAT_RES_MASK) |
+		(ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK) |
+		(blk_capped_size(size) << BLK_STAT_SIZE_SHIFT);
 }
 
 /*
diff --git a/block/blk-stat.h b/block/blk-stat.h
index a2050a0..462197f 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -8,12 +8,19 @@ 
 #define BLK_STAT_NSEC_MASK	~(BLK_STAT_NSEC - 1)
 
 /*
- * Upper 3 bits can be used elsewhere
+ * from upper:
+ * 3 bits: reserved for other usage
+ * 12 bits: size
+ * 49 bits: time
  */
 #define BLK_STAT_RES_BITS	3
-#define BLK_STAT_SHIFT		(64 - BLK_STAT_RES_BITS)
-#define BLK_STAT_TIME_MASK	((1ULL << BLK_STAT_SHIFT) - 1)
-#define BLK_STAT_MASK		~BLK_STAT_TIME_MASK
+#define BLK_STAT_SIZE_BITS	12
+#define BLK_STAT_RES_SHIFT	(64 - BLK_STAT_RES_BITS)
+#define BLK_STAT_SIZE_SHIFT	(BLK_STAT_RES_SHIFT - BLK_STAT_SIZE_BITS)
+#define BLK_STAT_TIME_MASK	((1ULL << BLK_STAT_SIZE_SHIFT) - 1)
+#define BLK_STAT_SIZE_MASK	\
+	(((1ULL << BLK_STAT_SIZE_BITS) - 1) << BLK_STAT_SIZE_SHIFT)
+#define BLK_STAT_RES_MASK	(~((1ULL << BLK_STAT_RES_SHIFT) - 1))
 
 enum {
 	BLK_STAT_READ	= 0,
@@ -26,7 +33,7 @@  void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
 void blk_stat_clear(struct request_queue *);
 void blk_stat_init(struct blk_rq_stat *);
 bool blk_stat_is_current(struct blk_rq_stat *);
-void blk_stat_set_issue_time(struct blk_issue_stat *);
+void blk_stat_set_issue(struct blk_issue_stat *stat, sector_t size);
 bool blk_stat_enable(struct request_queue *);
 
 static inline u64 __blk_stat_time(u64 time)
@@ -36,7 +43,17 @@  static inline u64 __blk_stat_time(u64 time)
 
 static inline u64 blk_stat_time(struct blk_issue_stat *stat)
 {
-	return __blk_stat_time(stat->time);
+	return __blk_stat_time(stat->stat);
+}
+
+static inline sector_t blk_capped_size(sector_t size)
+{
+	return size & ((1ULL << BLK_STAT_SIZE_BITS) - 1);
+}
+
+static inline sector_t blk_stat_size(struct blk_issue_stat *stat)
+{
+	return (stat->stat & BLK_STAT_SIZE_MASK) >> BLK_STAT_SIZE_SHIFT;
 }
 
 #endif
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 65f1de5..7265f1f 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -32,27 +32,27 @@  enum {
 
 static inline void wbt_clear_state(struct blk_issue_stat *stat)
 {
-	stat->time &= BLK_STAT_TIME_MASK;
+	stat->stat &= ~BLK_STAT_RES_MASK;
 }
 
 static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
 {
-	return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
+	return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT;
 }
 
 static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
 {
-	stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
+	stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT;
 }
 
 static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
 {
-	return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
+	return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED;
 }
 
 static inline bool wbt_is_read(struct blk_issue_stat *stat)
 {
-	return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
+	return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ;
 }
 
 struct rq_wait {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 01019a7..8159a6c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -257,7 +257,7 @@  static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
 }
 
 struct blk_issue_stat {
-	u64 time;
+	u64 stat;
 };
 
 #define BLK_RQ_STAT_BATCH	64