diff mbox series

[v2,1/3] mm: remove activate_page() from unuse_pte()

Message ID 20200818184704.3625199-1-yuzhao@google.com (mailing list archive)
State New, archived
Headers show
Series [v2,1/3] mm: remove activate_page() from unuse_pte() | expand

Commit Message

Yu Zhao Aug. 18, 2020, 6:47 p.m. UTC
We don't initially add anon pages to active lruvec after
commit b518154e59aa ("mm/vmscan: protect the workingset on anonymous LRU").
Remove activate_page() from unuse_pte(), which seems to be missed by
the commit. And make the function static while we are at it.

Before the commit, we called lru_cache_add_active_or_unevictable() to
add new ksm pages to active lruvec. Therefore, activate_page() wasn't
necessary for them in the first place.

Signed-off-by: Yu Zhao <yuzhao@google.com>
---
 include/linux/swap.h | 1 -
 mm/swap.c            | 4 ++--
 mm/swapfile.c        | 5 -----
 3 files changed, 2 insertions(+), 8 deletions(-)

Comments

Yang Shi Aug. 19, 2020, 10:16 p.m. UTC | #1
On Tue, Aug 18, 2020 at 11:47 AM Yu Zhao <yuzhao@google.com> wrote:
>
> We don't initially add anon pages to active lruvec after
> commit b518154e59aa ("mm/vmscan: protect the workingset on anonymous LRU").
> Remove activate_page() from unuse_pte(), which seems to be missed by
> the commit. And make the function static while we are at it.
>
> Before the commit, we called lru_cache_add_active_or_unevictable() to
> add new ksm pages to active lruvec. Therefore, activate_page() wasn't
> necessary for them in the first place.

Reviewed-by: Yang Shi <shy828301@gmail.com>

>
> Signed-off-by: Yu Zhao <yuzhao@google.com>
> ---
>  include/linux/swap.h | 1 -
>  mm/swap.c            | 4 ++--
>  mm/swapfile.c        | 5 -----
>  3 files changed, 2 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index 661046994db4..df6207346078 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -340,7 +340,6 @@ extern void lru_note_cost_page(struct page *);
>  extern void lru_cache_add(struct page *);
>  extern void lru_add_page_tail(struct page *page, struct page *page_tail,
>                          struct lruvec *lruvec, struct list_head *head);
> -extern void activate_page(struct page *);
>  extern void mark_page_accessed(struct page *);
>  extern void lru_add_drain(void);
>  extern void lru_add_drain_cpu(int cpu);
> diff --git a/mm/swap.c b/mm/swap.c
> index d16d65d9b4e0..25c4043491b3 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -348,7 +348,7 @@ static bool need_activate_page_drain(int cpu)
>         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
>  }
>
> -void activate_page(struct page *page)
> +static void activate_page(struct page *page)
>  {
>         page = compound_head(page);
>         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
> @@ -368,7 +368,7 @@ static inline void activate_page_drain(int cpu)
>  {
>  }
>
> -void activate_page(struct page *page)
> +static void activate_page(struct page *page)
>  {
>         pg_data_t *pgdat = page_pgdat(page);
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 12f59e641b5e..c287c560f96d 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1925,11 +1925,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
>                 lru_cache_add_inactive_or_unevictable(page, vma);
>         }
>         swap_free(entry);
> -       /*
> -        * Move the page to the active list so it is not
> -        * immediately swapped out again after swapon.
> -        */
> -       activate_page(page);
>  out:
>         pte_unmap_unlock(pte, ptl);
>         if (page != swapcache) {
> --
> 2.28.0.220.ged08abb693-goog
>
>
diff mbox series

Patch

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 661046994db4..df6207346078 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -340,7 +340,6 @@  extern void lru_note_cost_page(struct page *);
 extern void lru_cache_add(struct page *);
 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
 			 struct lruvec *lruvec, struct list_head *head);
-extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
 extern void lru_add_drain_cpu(int cpu);
diff --git a/mm/swap.c b/mm/swap.c
index d16d65d9b4e0..25c4043491b3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -348,7 +348,7 @@  static bool need_activate_page_drain(int cpu)
 	return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
 }
 
-void activate_page(struct page *page)
+static void activate_page(struct page *page)
 {
 	page = compound_head(page);
 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -368,7 +368,7 @@  static inline void activate_page_drain(int cpu)
 {
 }
 
-void activate_page(struct page *page)
+static void activate_page(struct page *page)
 {
 	pg_data_t *pgdat = page_pgdat(page);
 
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 12f59e641b5e..c287c560f96d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1925,11 +1925,6 @@  static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
 		lru_cache_add_inactive_or_unevictable(page, vma);
 	}
 	swap_free(entry);
-	/*
-	 * Move the page to the active list so it is not
-	 * immediately swapped out again after swapon.
-	 */
-	activate_page(page);
 out:
 	pte_unmap_unlock(pte, ptl);
 	if (page != swapcache) {