diff mbox series

[5/5] mlock: fix unevictable_pgs event counts on THP

Message ID alpine.LSU.2.11.2008301408230.5954@eggly.anvils (mailing list archive)
State New, archived
Headers show
Series mm: fixes to past from future testing | expand

Commit Message

Hugh Dickins Aug. 30, 2020, 9:09 p.m. UTC
5.8 commit 5d91f31faf8e ("mm: swap: fix vmstats for huge page") has
established that vm_events should count every subpage of a THP,
including unevictable_pgs_culled and unevictable_pgs_rescued; but
lru_cache_add_inactive_or_unevictable() was not doing so for
unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
unevictable_pgs mlocked, munlocked, cleared and stranded.

Fix them; but THPs don't go the pagevec way in mlock.c,
so no fixes needed on that path.

Fixes: 5d91f31faf8e ("mm: swap: fix vmstats for huge page")
Signed-off-by: Hugh Dickins <hughd@google.com>
---
I've only checked UNEVICTABLEs: there may be more inconsistencies left.
The check_move_unevictable_pages() patch brought me to this one, but
this is more important because mlock works on all THPs, without needing
special testing "force".  But, it's still just monotonically increasing
event counts, so not all that important.

 mm/mlock.c |   24 +++++++++++++++---------
 mm/swap.c  |    6 +++---
 2 files changed, 18 insertions(+), 12 deletions(-)

Comments

Shakeel Butt Aug. 31, 2020, 2:45 p.m. UTC | #1
On Sun, Aug 30, 2020 at 2:09 PM Hugh Dickins <hughd@google.com> wrote:
>
> 5.8 commit 5d91f31faf8e ("mm: swap: fix vmstats for huge page") has
> established that vm_events should count every subpage of a THP,
> including unevictable_pgs_culled and unevictable_pgs_rescued; but
> lru_cache_add_inactive_or_unevictable() was not doing so for
> unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
> unevictable_pgs mlocked, munlocked, cleared and stranded.
>
> Fix them; but THPs don't go the pagevec way in mlock.c,
> so no fixes needed on that path.
>
> Fixes: 5d91f31faf8e ("mm: swap: fix vmstats for huge page")
> Signed-off-by: Hugh Dickins <hughd@google.com>

Reviewed-by: Shakeel Butt <shakeelb@google.com>
Yang Shi Sept. 1, 2020, 3:41 p.m. UTC | #2
On Sun, Aug 30, 2020 at 2:09 PM Hugh Dickins <hughd@google.com> wrote:
>
> 5.8 commit 5d91f31faf8e ("mm: swap: fix vmstats for huge page") has
> established that vm_events should count every subpage of a THP,
> including unevictable_pgs_culled and unevictable_pgs_rescued; but
> lru_cache_add_inactive_or_unevictable() was not doing so for
> unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
> unevictable_pgs mlocked, munlocked, cleared and stranded.
>
> Fix them; but THPs don't go the pagevec way in mlock.c,
> so no fixes needed on that path.
>
> Fixes: 5d91f31faf8e ("mm: swap: fix vmstats for huge page")
> Signed-off-by: Hugh Dickins <hughd@google.com>

Acked-by: Yang Shi <shy828301@gmail.com>

> ---
> I've only checked UNEVICTABLEs: there may be more inconsistencies left.
> The check_move_unevictable_pages() patch brought me to this one, but
> this is more important because mlock works on all THPs, without needing
> special testing "force".  But, it's still just monotonically increasing
> event counts, so not all that important.
>
>  mm/mlock.c |   24 +++++++++++++++---------
>  mm/swap.c  |    6 +++---
>  2 files changed, 18 insertions(+), 12 deletions(-)
>
> --- 5.9-rc2/mm/mlock.c  2020-08-16 17:32:50.665507048 -0700
> +++ linux/mm/mlock.c    2020-08-28 17:42:07.975278411 -0700
> @@ -58,11 +58,14 @@ EXPORT_SYMBOL(can_do_mlock);
>   */
>  void clear_page_mlock(struct page *page)
>  {
> +       int nr_pages;
> +
>         if (!TestClearPageMlocked(page))
>                 return;
>
> -       mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
> -       count_vm_event(UNEVICTABLE_PGCLEARED);
> +       nr_pages = thp_nr_pages(page);
> +       mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
> +       count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
>         /*
>          * The previous TestClearPageMlocked() corresponds to the smp_mb()
>          * in __pagevec_lru_add_fn().
> @@ -76,7 +79,7 @@ void clear_page_mlock(struct page *page)
>                  * We lost the race. the page already moved to evictable list.
>                  */
>                 if (PageUnevictable(page))
> -                       count_vm_event(UNEVICTABLE_PGSTRANDED);
> +                       count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
>         }
>  }
>
> @@ -93,9 +96,10 @@ void mlock_vma_page(struct page *page)
>         VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
>
>         if (!TestSetPageMlocked(page)) {
> -               mod_zone_page_state(page_zone(page), NR_MLOCK,
> -                                   thp_nr_pages(page));
> -               count_vm_event(UNEVICTABLE_PGMLOCKED);
> +               int nr_pages = thp_nr_pages(page);
> +
> +               mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
> +               count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
>                 if (!isolate_lru_page(page))
>                         putback_lru_page(page);
>         }
> @@ -138,7 +142,7 @@ static void __munlock_isolated_page(stru
>
>         /* Did try_to_unlock() succeed or punt? */
>         if (!PageMlocked(page))
> -               count_vm_event(UNEVICTABLE_PGMUNLOCKED);
> +               count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
>
>         putback_lru_page(page);
>  }
> @@ -154,10 +158,12 @@ static void __munlock_isolated_page(stru
>   */
>  static void __munlock_isolation_failed(struct page *page)
>  {
> +       int nr_pages = thp_nr_pages(page);
> +
>         if (PageUnevictable(page))
> -               __count_vm_event(UNEVICTABLE_PGSTRANDED);
> +               __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
>         else
> -               __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
> +               __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
>  }
>
>  /**
> --- 5.9-rc2/mm/swap.c   2020-08-16 17:32:50.709507284 -0700
> +++ linux/mm/swap.c     2020-08-28 17:42:07.975278411 -0700
> @@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictab
>
>         unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
>         if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
> +               int nr_pages = thp_nr_pages(page);
>                 /*
>                  * We use the irq-unsafe __mod_zone_page_stat because this
>                  * counter is not modified from interrupt context, and the pte
>                  * lock is held(spinlock), which implies preemption disabled.
>                  */
> -               __mod_zone_page_state(page_zone(page), NR_MLOCK,
> -                                   thp_nr_pages(page));
> -               count_vm_event(UNEVICTABLE_PGMLOCKED);
> +               __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
> +               count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
>         }
>         lru_cache_add(page);
>  }
>
diff mbox series

Patch

--- 5.9-rc2/mm/mlock.c	2020-08-16 17:32:50.665507048 -0700
+++ linux/mm/mlock.c	2020-08-28 17:42:07.975278411 -0700
@@ -58,11 +58,14 @@  EXPORT_SYMBOL(can_do_mlock);
  */
 void clear_page_mlock(struct page *page)
 {
+	int nr_pages;
+
 	if (!TestClearPageMlocked(page))
 		return;
 
-	mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
-	count_vm_event(UNEVICTABLE_PGCLEARED);
+	nr_pages = thp_nr_pages(page);
+	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+	count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
 	/*
 	 * The previous TestClearPageMlocked() corresponds to the smp_mb()
 	 * in __pagevec_lru_add_fn().
@@ -76,7 +79,7 @@  void clear_page_mlock(struct page *page)
 		 * We lost the race. the page already moved to evictable list.
 		 */
 		if (PageUnevictable(page))
-			count_vm_event(UNEVICTABLE_PGSTRANDED);
+			count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
 	}
 }
 
@@ -93,9 +96,10 @@  void mlock_vma_page(struct page *page)
 	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
 
 	if (!TestSetPageMlocked(page)) {
-		mod_zone_page_state(page_zone(page), NR_MLOCK,
-				    thp_nr_pages(page));
-		count_vm_event(UNEVICTABLE_PGMLOCKED);
+		int nr_pages = thp_nr_pages(page);
+
+		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
 		if (!isolate_lru_page(page))
 			putback_lru_page(page);
 	}
@@ -138,7 +142,7 @@  static void __munlock_isolated_page(stru
 
 	/* Did try_to_unlock() succeed or punt? */
 	if (!PageMlocked(page))
-		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+		count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
 
 	putback_lru_page(page);
 }
@@ -154,10 +158,12 @@  static void __munlock_isolated_page(stru
  */
 static void __munlock_isolation_failed(struct page *page)
 {
+	int nr_pages = thp_nr_pages(page);
+
 	if (PageUnevictable(page))
-		__count_vm_event(UNEVICTABLE_PGSTRANDED);
+		__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
 	else
-		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+		__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
 }
 
 /**
--- 5.9-rc2/mm/swap.c	2020-08-16 17:32:50.709507284 -0700
+++ linux/mm/swap.c	2020-08-28 17:42:07.975278411 -0700
@@ -494,14 +494,14 @@  void lru_cache_add_inactive_or_unevictab
 
 	unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
 	if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
+		int nr_pages = thp_nr_pages(page);
 		/*
 		 * We use the irq-unsafe __mod_zone_page_stat because this
 		 * counter is not modified from interrupt context, and the pte
 		 * lock is held(spinlock), which implies preemption disabled.
 		 */
-		__mod_zone_page_state(page_zone(page), NR_MLOCK,
-				    thp_nr_pages(page));
-		count_vm_event(UNEVICTABLE_PGMLOCKED);
+		__mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
 	}
 	lru_cache_add(page);
 }