diff mbox series

[RFC,6/9] block, bfq: only count group that the bfq_queue belongs to

Message ID 20211127101132.486806-7-yukuai3@huawei.com (mailing list archive)
State New, archived
Headers show
Series support concurrent sync io for bfq on a specail occasion | expand

Commit Message

Yu Kuai Nov. 27, 2021, 10:11 a.m. UTC
Currently, group will be counted into 'num_groups_with_pending_reqs'
once it's child cgroup is activated, even if the group doesn't have
any pending requests itself.

For example, if we issue sync io in cgroup /root/c1/c2, root, c1 and c2
will all be counted into 'num_groups_with_pending_reqs', which makes it
impossible to handle requests concurrently.

This patch doesn't count the group that doesn't have any pending
request, even if it's child group is activated.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/bfq-wf2q.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 6693765ff3a0..343cfc8b952e 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -950,6 +950,8 @@  static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
  * __bfq_activate_entity - handle activation of entity.
  * @entity: the entity being activated.
  * @non_blocking_wait_rq: true if entity was waiting for a request
+ * @count_group: if entity represents group, true if the group will be
+ * counted in 'num_groups_with_pending_reqs'.
  *
  * Called for a 'true' activation, i.e., if entity is not active and
  * one of its children receives a new request.
@@ -959,7 +961,8 @@  static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
  * from its idle tree.
  */
 static void __bfq_activate_entity(struct bfq_entity *entity,
-				  bool non_blocking_wait_rq)
+				  bool non_blocking_wait_rq,
+				  bool count_group)
 {
 	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
 	bool backshifted = false;
@@ -1010,7 +1013,7 @@  static void __bfq_activate_entity(struct bfq_entity *entity,
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 update:
-	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
+	if (count_group && !bfq_entity_to_bfqq(entity)) { /* bfq_group */
 		struct bfq_group *bfqg =
 			container_of(entity, struct bfq_group, entity);
 		struct bfq_data *bfqd = bfqg->bfqd;
@@ -1106,7 +1109,8 @@  static void __bfq_requeue_entity(struct bfq_entity *entity)
 
 static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
 					  struct bfq_sched_data *sd,
-					  bool non_blocking_wait_rq)
+					  bool non_blocking_wait_rq,
+					  bool count_group)
 {
 	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
 
@@ -1122,7 +1126,8 @@  static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
 		 * Not in service and not queued on its active tree:
 		 * the activity is idle and this is a true activation.
 		 */
-		__bfq_activate_entity(entity, non_blocking_wait_rq);
+		__bfq_activate_entity(entity, non_blocking_wait_rq,
+				      count_group);
 }
 
 
@@ -1144,10 +1149,12 @@  static void bfq_activate_requeue_entity(struct bfq_entity *entity,
 					bool requeue, bool expiration)
 {
 	struct bfq_sched_data *sd;
+	int depth = 0;
 
 	for_each_entity(entity) {
 		sd = entity->sched_data;
-		__bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq);
+		__bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq,
+					      depth++ == 1);
 
 		if (sd && !bfq_update_next_in_service(sd, entity, expiration) &&
 		    !requeue)