diff mbox series

[v3,for-5.18/block] block: don't merge across cgroup boundaries if blkcg is enabled

Message ID Yi/eE/6zFNyWJ+qd@slm.duckdns.org (mailing list archive)
State New, archived
Headers show
Series [v3,for-5.18/block] block: don't merge across cgroup boundaries if blkcg is enabled | expand

Commit Message

Tejun Heo March 15, 2022, 12:30 a.m. UTC
blk-iocost and iolatency are cgroup aware rq-qos policies but they didn't
disable merges across different cgroups. This obviously can lead to
accounting and control errors but more importantly to priority inversions -
e.g. an IO which belongs to a higher priority cgroup or IO class may end up
getting throttled incorrectly because it gets merged to an IO issued from a
low priority cgroup.

Fix it by adding blk_cgroup_mergeable() which is called from merge paths and
rejects cross-cgroup and cross-issue_as_root merges.

Signed-off-by: Tejun Heo <tj@kernel.org>
Fixes: d70675121546 ("block: introduce blk-iolatency io controller")
Cc: stable@vger.kernel.org # v4.19+
Cc: Josef Bacik <jbacik@fb.com>
---
v2 was tagged for-5.18/block but was still based on block-for-5.17. Refresh
on top of for-5.18/block. My apologies about all the messiness.

v3: Actually rebased on top of block/for-5.18.

v2: Dropped conditional enabling. Always disallow cross-blkcg merges for
    simpilcity. While this may spuriously prevent some merges for cases
    where blkcg is enabled but no control is applied, that is a small cross
    section.

 block/blk-cgroup.h | 17 +++++++++++++++++
 block/blk-merge.c  | 11 +++++++++++
 2 files changed, 28 insertions(+)

Comments

Jens Axboe March 15, 2022, 1:15 a.m. UTC | #1
On Mon, 14 Mar 2022 14:30:11 -1000, Tejun Heo wrote:
> blk-iocost and iolatency are cgroup aware rq-qos policies but they didn't
> disable merges across different cgroups. This obviously can lead to
> accounting and control errors but more importantly to priority inversions -
> e.g. an IO which belongs to a higher priority cgroup or IO class may end up
> getting throttled incorrectly because it gets merged to an IO issued from a
> low priority cgroup.
> 
> [...]

Applied, thanks!

[1/1] block: don't merge across cgroup boundaries if blkcg is enabled
      commit: 6b2b04590b51aa4cf395fcd185ce439cab5961dc

Best regards,
diff mbox series

Patch

diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 3e91803c4a555..47e1e38390c96 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -15,6 +15,7 @@ 
  */
 
 #include <linux/blk-cgroup.h>
+#include <linux/blk-mq.h>
 
 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
 #define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)
@@ -428,6 +429,21 @@  static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
 		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
 }
 
+/**
+ * blk_cgroup_mergeable - Determine whether to allow or disallow merges
+ * @rq: request to merge into
+ * @bio: bio to merge
+ *
+ * @bio and @rq should belong to the same cgroup and their issue_as_root should
+ * match. The latter is necessary as we don't want to throttle e.g. a metadata
+ * update because it happens to be next to a regular IO.
+ */
+static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
+{
+	return rq->bio->bi_blkg == bio->bi_blkg &&
+		bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
+}
+
 void blk_cgroup_bio_start(struct bio *bio);
 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
 #else	/* CONFIG_BLK_CGROUP */
@@ -467,6 +483,7 @@  static inline void blkg_put(struct blkcg_gq *blkg) { }
 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
 static inline void blkcg_bio_issue_init(struct bio *bio) { }
 static inline void blk_cgroup_bio_start(struct bio *bio) { }
+static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
 
 #define blk_queue_for_each_rl(rl, q)	\
 	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8d8177f71ebdb..ea6968313b4a8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,6 +9,7 @@ 
 #include <linux/blk-integrity.h>
 #include <linux/scatterlist.h>
 #include <linux/part_stat.h>
+#include <linux/blk-cgroup.h>
 
 #include <trace/events/block.h>
 
@@ -598,6 +599,9 @@  static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 		unsigned int nr_phys_segs)
 {
+	if (!blk_cgroup_mergeable(req, bio))
+		goto no_merge;
+
 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
 		goto no_merge;
 
@@ -694,6 +698,9 @@  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 	if (total_phys_segments > blk_rq_get_max_segments(req))
 		return 0;
 
+	if (!blk_cgroup_mergeable(req, next->bio))
+		return 0;
+
 	if (blk_integrity_merge_rq(q, req, next) == false)
 		return 0;
 
@@ -902,6 +909,10 @@  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 	if (bio_data_dir(bio) != rq_data_dir(rq))
 		return false;
 
+	/* don't merge across cgroup boundaries */
+	if (!blk_cgroup_mergeable(rq, bio))
+		return false;
+
 	/* only merge integrity protected bio into ditto rq */
 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 		return false;