diff mbox series

[RFC] block: set bdi congested when no tag available

Message ID CAGWkznFRps1P21Oggx0y+6B4svs2FFe7LS0W-N9EcpeToMtJ=g@mail.gmail.com (mailing list archive)
State New
Headers show
Series [RFC] block: set bdi congested when no tag available | expand

Commit Message

Zhaoyang Huang March 9, 2022, 7:40 a.m. UTC
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>

In previous version, block layer will set bdi to be congested when
get_request fail, which may throttle direct_reclaim. Move them back
under current blk-mq design.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
 block/blk-mq-tag.c      | 18 +++++++++++++++++-
 include/linux/sbitmap.h |  1 +
 lib/sbitmap.c           | 17 +++++++++++++++++
 3 files changed, 35 insertions(+), 1 deletion(-)

Comments

Vlastimil Babka March 9, 2022, 11:50 a.m. UTC | #1
On 3/9/22 08:40, Zhaoyang Huang wrote:
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> 
> In previous version, block layer will set bdi to be congested when
> get_request fail, which may throttle direct_reclaim. Move them back
> under current blk-mq design.

Hm I thought Mel removed the direct reclaim throttling based on bdi
congestion in his series [1]. Maybe block layers has other uses (I have no
idea), for tracking congestion, that should be mentioned instead?

[1]
https://lore.kernel.org/all/20211022144651.19914-1-mgorman@techsingularity.net/

> 
> Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> ---
>  block/blk-mq-tag.c      | 18 +++++++++++++++++-
>  include/linux/sbitmap.h |  1 +
>  lib/sbitmap.c           | 17 +++++++++++++++++
>  3 files changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 995336a..bd1e520 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -11,6 +11,7 @@
> 
>  #include <linux/blk-mq.h>
>  #include <linux/delay.h>
> +#include <linux/backing-dev.h>
>  #include "blk.h"
>  #include "blk-mq.h"
>  #include "blk-mq-sched.h"
> @@ -126,8 +127,11 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
>   if (tag != BLK_MQ_NO_TAG)
>   goto found_tag;
> 
> - if (data->flags & BLK_MQ_REQ_NOWAIT)
> + if (data->flags & BLK_MQ_REQ_NOWAIT) {
> + set_bdi_congested(data->q->disk->bdi,BLK_RW_SYNC);
> + set_bdi_congested(data->q->disk->bdi,BLK_RW_ASYNC);
>   return BLK_MQ_NO_TAG;
> + }
> 
>   ws = bt_wait_ptr(bt, data->hctx);
>   do {
> @@ -193,9 +197,21 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
>   return tag + tag_offset;
>  }
> 
> +static bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
> +{
> + if (!tags)
> + return true;
> +
> + return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
> +}
> +
>  void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
>       unsigned int tag)
>  {
> + if (!blk_mq_has_free_tags(tags)) {
> + clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_SYNC);
> + clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_ASYNC);
> + }
>   if (!blk_mq_tag_is_reserved(tags, tag)) {
>   const int real_tag = tag - tags->nr_reserved_tags;
> 
> diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
> index 4a6ff27..30a5553 100644
> --- a/include/linux/sbitmap.h
> +++ b/include/linux/sbitmap.h
> @@ -210,6 +210,7 @@ static inline void sbitmap_free(struct sbitmap *sb)
>   * Return: true if any bit in the bitmap is set, false otherwise.
>   */
>  bool sbitmap_any_bit_set(const struct sbitmap *sb);
> +bool sbitmap_any_bit_clear(const struct sbitmap *sb);
> 
>  #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
>  #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
> diff --git a/lib/sbitmap.c b/lib/sbitmap.c
> index 2709ab8..baa30d8 100644
> --- a/lib/sbitmap.c
> +++ b/lib/sbitmap.c
> @@ -309,6 +309,23 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
>  }
>  EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
> 
> +bool sbitmap_any_bit_clear(const struct sbitmap *sb)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < sb->map_nr; i++) {
> + const struct sbitmap_word *word = &sb->map[i];
> + unsigned long mask = word->word & ~word->cleared;
> + unsigned long ret;
> +
> + ret = find_first_zero_bit(&mask, word->depth);
> + if (ret < word->depth)
> + return true;
> + }
> + return false;
> +}
> +EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
> +
>  static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
>  {
>   unsigned int i, weight = 0;
Zhaoyang Huang June 15, 2022, 8:58 a.m. UTC | #2
On Wed, Mar 9, 2022 at 7:50 PM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 3/9/22 08:40, Zhaoyang Huang wrote:
> > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> >
> > In previous version, block layer will set bdi to be congested when
> > get_request fail, which may throttle direct_reclaim. Move them back
> > under current blk-mq design.
>
> Hm I thought Mel removed the direct reclaim throttling based on bdi
> congestion in his series [1]. Maybe block layers has other uses (I have no
> idea), for tracking congestion, that should be mentioned instead?
>
> [1]
> https://lore.kernel.org/all/20211022144651.19914-1-mgorman@techsingularity.net/
>
As my understanding, the above patch change suspending on bdi
congested for a given interval to be woken up by finish of request or
bio. This patch is not against above one, which add one more scenario
for setting bdi as congested.
> >
> > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> > ---
> >  block/blk-mq-tag.c      | 18 +++++++++++++++++-
> >  include/linux/sbitmap.h |  1 +
> >  lib/sbitmap.c           | 17 +++++++++++++++++
> >  3 files changed, 35 insertions(+), 1 deletion(-)
> >
> > diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> > index 995336a..bd1e520 100644
> > --- a/block/blk-mq-tag.c
> > +++ b/block/blk-mq-tag.c
> > @@ -11,6 +11,7 @@
> >
> >  #include <linux/blk-mq.h>
> >  #include <linux/delay.h>
> > +#include <linux/backing-dev.h>
> >  #include "blk.h"
> >  #include "blk-mq.h"
> >  #include "blk-mq-sched.h"
> > @@ -126,8 +127,11 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
> >   if (tag != BLK_MQ_NO_TAG)
> >   goto found_tag;
> >
> > - if (data->flags & BLK_MQ_REQ_NOWAIT)
> > + if (data->flags & BLK_MQ_REQ_NOWAIT) {
> > + set_bdi_congested(data->q->disk->bdi,BLK_RW_SYNC);
> > + set_bdi_congested(data->q->disk->bdi,BLK_RW_ASYNC);
> >   return BLK_MQ_NO_TAG;
> > + }
> >
> >   ws = bt_wait_ptr(bt, data->hctx);
> >   do {
> > @@ -193,9 +197,21 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
> >   return tag + tag_offset;
> >  }
> >
> > +static bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
> > +{
> > + if (!tags)
> > + return true;
> > +
> > + return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
> > +}
> > +
> >  void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
> >       unsigned int tag)
> >  {
> > + if (!blk_mq_has_free_tags(tags)) {
> > + clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_SYNC);
> > + clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_ASYNC);
> > + }
> >   if (!blk_mq_tag_is_reserved(tags, tag)) {
> >   const int real_tag = tag - tags->nr_reserved_tags;
> >
> > diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
> > index 4a6ff27..30a5553 100644
> > --- a/include/linux/sbitmap.h
> > +++ b/include/linux/sbitmap.h
> > @@ -210,6 +210,7 @@ static inline void sbitmap_free(struct sbitmap *sb)
> >   * Return: true if any bit in the bitmap is set, false otherwise.
> >   */
> >  bool sbitmap_any_bit_set(const struct sbitmap *sb);
> > +bool sbitmap_any_bit_clear(const struct sbitmap *sb);
> >
> >  #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
> >  #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
> > diff --git a/lib/sbitmap.c b/lib/sbitmap.c
> > index 2709ab8..baa30d8 100644
> > --- a/lib/sbitmap.c
> > +++ b/lib/sbitmap.c
> > @@ -309,6 +309,23 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
> >  }
> >  EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
> >
> > +bool sbitmap_any_bit_clear(const struct sbitmap *sb)
> > +{
> > + unsigned int i;
> > +
> > + for (i = 0; i < sb->map_nr; i++) {
> > + const struct sbitmap_word *word = &sb->map[i];
> > + unsigned long mask = word->word & ~word->cleared;
> > + unsigned long ret;
> > +
> > + ret = find_first_zero_bit(&mask, word->depth);
> > + if (ret < word->depth)
> > + return true;
> > + }
> > + return false;
> > +}
> > +EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
> > +
> >  static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
> >  {
> >   unsigned int i, weight = 0;
>
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 995336a..bd1e520 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -11,6 +11,7 @@ 

 #include <linux/blk-mq.h>
 #include <linux/delay.h>
+#include <linux/backing-dev.h>
 #include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-sched.h"
@@ -126,8 +127,11 @@  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
  if (tag != BLK_MQ_NO_TAG)
  goto found_tag;

- if (data->flags & BLK_MQ_REQ_NOWAIT)
+ if (data->flags & BLK_MQ_REQ_NOWAIT) {
+ set_bdi_congested(data->q->disk->bdi,BLK_RW_SYNC);
+ set_bdi_congested(data->q->disk->bdi,BLK_RW_ASYNC);
  return BLK_MQ_NO_TAG;
+ }

  ws = bt_wait_ptr(bt, data->hctx);
  do {
@@ -193,9 +197,21 @@  unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
  return tag + tag_offset;
 }

+static bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
+{
+ if (!tags)
+ return true;
+
+ return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
+}
+
 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
      unsigned int tag)
 {
+ if (!blk_mq_has_free_tags(tags)) {
+ clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_SYNC);
+ clear_bdi_congested(ctx->queue->disk->bdi, BLK_RW_ASYNC);
+ }
  if (!blk_mq_tag_is_reserved(tags, tag)) {
  const int real_tag = tag - tags->nr_reserved_tags;

diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 4a6ff27..30a5553 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -210,6 +210,7 @@  static inline void sbitmap_free(struct sbitmap *sb)
  * Return: true if any bit in the bitmap is set, false otherwise.
  */
 bool sbitmap_any_bit_set(const struct sbitmap *sb);
+bool sbitmap_any_bit_clear(const struct sbitmap *sb);

 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2709ab8..baa30d8 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -309,6 +309,23 @@  bool sbitmap_any_bit_set(const struct sbitmap *sb)
 }
 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);

+bool sbitmap_any_bit_clear(const struct sbitmap *sb)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ const struct sbitmap_word *word = &sb->map[i];
+ unsigned long mask = word->word & ~word->cleared;
+ unsigned long ret;
+
+ ret = find_first_zero_bit(&mask, word->depth);
+ if (ret < word->depth)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
+
 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
 {
  unsigned int i, weight = 0;