@@ -948,7 +948,8 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
struct request_queue *top_q;
- bool poll_on;
+ bool orig_poll_on, poll_on;
+ u64 old_nr_migrations;
BUG_ON(bio->bi_next);
@@ -958,6 +959,8 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
top_q = bio->bi_bdev->bd_disk->queue;
poll_on = test_bit(QUEUE_FLAG_POLL, &top_q->queue_flags) &&
(bio->bi_opf & REQ_HIPRI);
+ orig_poll_on = poll_on;
+ old_nr_migrations = READ_ONCE(current->se.nr_migrations);
do {
blk_qc_t cookie;
@@ -987,7 +990,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
ret = cookie;
} else if (ret != cookie) {
/* bio gets split and enqueued to multi hctxs */
- ret = BLK_QC_T_BIO_POLL_ALL;
+ ret = blk_qc_t_get_by_cpu();
poll_on = false;
}
}
@@ -1014,6 +1017,17 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
current->bio_list = NULL;
+ /*
+ * For cases when bio gets split and enqueued into multi hctxs, return
+ * the corresponding CPU number when current process has not been
+ * migrated to another CPU. Return BLK_QC_T_BIO_POLL_ALL otherwise,
+ * falling back to iterating and polling on all hw queues, since split
+ * bios are submitted to different CPUs in this case.
+ */
+ if (orig_poll_on != poll_on &&
+ old_nr_migrations != READ_ONCE(current->se.nr_migrations))
+ ret = BLK_QC_T_BIO_POLL_ALL;
+
return ret;
}
@@ -555,8 +555,21 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
* ^
* reserved for compatibility with mq
*
- * 2. When @bio gets split and enqueued into multi hw queues, the returned
- * cookie is just BLK_QC_T_BIO_POLL_ALL flag.
+ * 2. When @bio gets split and enqueued into multi hw queues, and current
+ * process has *not* been migrated to another CPU, the returned cookie
+ * actually stores the corresponding CPU number on which the IO submission
+ * happened. Also with BLK_QC_T_BIO_POLL_CPU flag set.
+ *
+ * 63 31 0 (bit)
+ * +----------------------+-----------------------+-+
+ * | cpu | |1|
+ * +----------------------+-----------------------+-+
+ * ^
+ * BLK_QC_T_BIO_POLL_CPU
+ *
+ * 3. When @bio gets split and enqueued into multi hw queues, and current
+ * process has ever been migrated to another CPU, the returned cookie is just
+ * BLK_QC_T_BIO_POLL_ALL flag.
*
* 63 0 (bit)
* +----------------------------------------------+-+
@@ -565,7 +578,7 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
* ^
* BLK_QC_T_BIO_POLL_ALL
*
- * 3. Otherwise, return BLK_QC_T_NONE as the cookie.
+ * 4. Otherwise, return BLK_QC_T_NONE as the cookie.
*
* 63 0 (bit)
* +-----------------------------------------------+
@@ -574,12 +587,18 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
*/
#define BLK_QC_T_HIGH_SHIFT 32
#define BLK_QC_T_BIO_POLL_ALL 1U
+#define BLK_QC_T_BIO_POLL_CPU 2U
static inline unsigned int blk_qc_t_to_devt(blk_qc_t cookie)
{
return cookie >> BLK_QC_T_HIGH_SHIFT;
}
+static inline unsigned int blk_qc_t_to_cpu(blk_qc_t cookie)
+{
+ return cookie >> BLK_QC_T_HIGH_SHIFT;
+}
+
static inline blk_qc_t blk_qc_t_get_by_devt(unsigned int dev,
unsigned int queue_num)
{
@@ -587,9 +606,20 @@ static inline blk_qc_t blk_qc_t_get_by_devt(unsigned int dev,
(queue_num << BLK_QC_T_SHIFT);
}
+static inline blk_qc_t blk_qc_t_get_by_cpu(void)
+{
+ return ((blk_qc_t)raw_smp_processor_id() << BLK_QC_T_HIGH_SHIFT) |
+ BLK_QC_T_BIO_POLL_CPU;
+}
+
static inline bool blk_qc_t_is_poll_multi(blk_qc_t cookie)
{
- return cookie & BLK_QC_T_BIO_POLL_ALL;
+ return cookie & (BLK_QC_T_BIO_POLL_ALL | BLK_QC_T_BIO_POLL_CPU);
+}
+
+static inline bool blk_qc_t_is_poll_cpu(blk_qc_t cookie)
+{
+ return cookie & BLK_QC_T_BIO_POLL_CPU;
}
struct blk_rq_stat {
Offer one sub-fastpath for bio-based polling when bio submitted to dm device gets split and enqueued into multiple hw queues, while the IO submission process has not been migrated to another CPU. In this case, the IO submission routine will return the CPU number on which the IO submission happened as the returned cookie, while the polling routine will only iterate and poll on hw queues that this CPU number maps, instead of iterating *all* hw queues. This optimization can dramatically reduce cache ping-pong and thus improve the polling performance, when multiple hw queues in polling mode per device could be reserved when there are multiple polling processes. It will fall back to iterating all hw queues in polling mode, once the process has ever been migrated to another CPU during the IO submission phase. Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com> --- block/blk-core.c | 18 ++++++++++++++++-- include/linux/blk_types.h | 38 ++++++++++++++++++++++++++++++++++---- 2 files changed, 50 insertions(+), 6 deletions(-)