diff mbox series

mm: thp: update split_queue_len correctly

Message ID 20211123174658.1728753-1-shakeelb@google.com (mailing list archive)
State New
Headers show
Series mm: thp: update split_queue_len correctly | expand

Commit Message

Shakeel Butt Nov. 23, 2021, 5:46 p.m. UTC
The deferred THPs are split on memory pressure through shrinker
callback and splitting of THP during reclaim can fail for several
reasons like unable to lock the THP, under writeback or unexpected
number of pins on the THP. Such pages are put back on the deferred split
list for consideration later. However kernel does not update the
deferred queue size on putting back the pages whose split was failed.
This patch fixes that.

Fixes: 364c1eebe453 ("mm: thp: extract split_queue_* into a struct")
Signed-off-by: Shakeel Butt <shakeelb@google.com>
---
 mm/huge_memory.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

Comments

Shakeel Butt Nov. 23, 2021, 5:59 p.m. UTC | #1
On Tue, Nov 23, 2021 at 9:47 AM Shakeel Butt <shakeelb@google.com> wrote:
>
> The deferred THPs are split on memory pressure through shrinker
> callback and splitting of THP during reclaim can fail for several
> reasons like unable to lock the THP, under writeback or unexpected
> number of pins on the THP. Such pages are put back on the deferred split
> list for consideration later. However kernel does not update the
> deferred queue size on putting back the pages whose split was failed.
> This patch fixes that.

I forgot to add the user visible impact.

"Without this patch the split_queue_len can underflow. Shrinker will
always get that there are some THPs to split even if there are not and
waste some cpu to scan the empty list."
Yang Shi Nov. 23, 2021, 6:51 p.m. UTC | #2
On Tue, Nov 23, 2021 at 9:47 AM Shakeel Butt <shakeelb@google.com> wrote:
>
> The deferred THPs are split on memory pressure through shrinker
> callback and splitting of THP during reclaim can fail for several
> reasons like unable to lock the THP, under writeback or unexpected
> number of pins on the THP. Such pages are put back on the deferred split
> list for consideration later. However kernel does not update the
> deferred queue size on putting back the pages whose split was failed.
> This patch fixes that.
>
> Fixes: 364c1eebe453 ("mm: thp: extract split_queue_* into a struct")
> Signed-off-by: Shakeel Butt <shakeelb@google.com>
> ---
>  mm/huge_memory.c | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e5483347291c..4fff9584815b 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2809,7 +2809,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
>         unsigned long flags;
>         LIST_HEAD(list), *pos, *next;
>         struct page *page;
> -       int split = 0;
> +       unsigned long split = 0, num = 0;
>
>  #ifdef CONFIG_MEMCG
>         if (sc->memcg)
> @@ -2823,6 +2823,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
>                 page = compound_head(page);
>                 if (get_page_unless_zero(page)) {
>                         list_move(page_deferred_list(page), &list);
> +                       num++;

Thanks for catching this. But I don't think "num" is needed, isn't the
below code good enough?

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..1fbd8299db0c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2847,6 +2847,7 @@ static unsigned long deferred_split_scan(struct
shrinker *shrink,

        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        list_splice_tail(&list, &ds_queue->split_queue);
+       ds_queue->split_queue_len -= split;
        spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);

        /*

>                 } else {
>                         /* We lost race with put_compound_page() */
>                         list_del_init(page_deferred_list(page));
> @@ -2847,6 +2848,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
>
>         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
>         list_splice_tail(&list, &ds_queue->split_queue);
> +       ds_queue->split_queue_len += (num - split);
>         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
>
>         /*
> --
> 2.34.0.rc2.393.gf8c9666880-goog
>
Shakeel Butt Nov. 23, 2021, 7:03 p.m. UTC | #3
On Tue, Nov 23, 2021 at 10:51 AM Yang Shi <shy828301@gmail.com> wrote:
>
> On Tue, Nov 23, 2021 at 9:47 AM Shakeel Butt <shakeelb@google.com> wrote:
> >
> > The deferred THPs are split on memory pressure through shrinker
> > callback and splitting of THP during reclaim can fail for several
> > reasons like unable to lock the THP, under writeback or unexpected
> > number of pins on the THP. Such pages are put back on the deferred split
> > list for consideration later. However kernel does not update the
> > deferred queue size on putting back the pages whose split was failed.
> > This patch fixes that.
> >
> > Fixes: 364c1eebe453 ("mm: thp: extract split_queue_* into a struct")
> > Signed-off-by: Shakeel Butt <shakeelb@google.com>
> > ---
> >  mm/huge_memory.c | 4 +++-
> >  1 file changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index e5483347291c..4fff9584815b 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2809,7 +2809,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
> >         unsigned long flags;
> >         LIST_HEAD(list), *pos, *next;
> >         struct page *page;
> > -       int split = 0;
> > +       unsigned long split = 0, num = 0;
> >
> >  #ifdef CONFIG_MEMCG
> >         if (sc->memcg)
> > @@ -2823,6 +2823,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
> >                 page = compound_head(page);
> >                 if (get_page_unless_zero(page)) {
> >                         list_move(page_deferred_list(page), &list);
> > +                       num++;
>
> Thanks for catching this. But I don't think "num" is needed, isn't the
> below code good enough?

Yes you are right. I will send the next version. I will at least
change the type of split to unsigned long.

>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e5483347291c..1fbd8299db0c 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2847,6 +2847,7 @@ static unsigned long deferred_split_scan(struct
> shrinker *shrink,
>
>         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
>         list_splice_tail(&list, &ds_queue->split_queue);
> +       ds_queue->split_queue_len -= split;
>         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
>
>         /*
>
> >                 } else {
> >                         /* We lost race with put_compound_page() */
> >                         list_del_init(page_deferred_list(page));
> > @@ -2847,6 +2848,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
> >
> >         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
> >         list_splice_tail(&list, &ds_queue->split_queue);
> > +       ds_queue->split_queue_len += (num - split);
> >         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
> >
> >         /*
> > --
> > 2.34.0.rc2.393.gf8c9666880-goog
> >
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..4fff9584815b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2809,7 +2809,7 @@  static unsigned long deferred_split_scan(struct shrinker *shrink,
 	unsigned long flags;
 	LIST_HEAD(list), *pos, *next;
 	struct page *page;
-	int split = 0;
+	unsigned long split = 0, num = 0;
 
 #ifdef CONFIG_MEMCG
 	if (sc->memcg)
@@ -2823,6 +2823,7 @@  static unsigned long deferred_split_scan(struct shrinker *shrink,
 		page = compound_head(page);
 		if (get_page_unless_zero(page)) {
 			list_move(page_deferred_list(page), &list);
+			num++;
 		} else {
 			/* We lost race with put_compound_page() */
 			list_del_init(page_deferred_list(page));
@@ -2847,6 +2848,7 @@  static unsigned long deferred_split_scan(struct shrinker *shrink,
 
 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
 	list_splice_tail(&list, &ds_queue->split_queue);
+	ds_queue->split_queue_len += (num - split);
 	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 
 	/*