diff mbox series

[v3,1/2] mm: rmap: make try_to_unmap() void function

Message ID 20210525162145.3510-1-shy828301@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v3,1/2] mm: rmap: make try_to_unmap() void function | expand

Commit Message

Yang Shi May 25, 2021, 4:21 p.m. UTC
Currently try_to_unmap() return bool value by checking page_mapcount(),
however this may return false positive since page_mapcount() doesn't
check all subpages of compound page.  The total_mapcount() could be used
instead, but its cost is higher since it traverses all subpages.

Actually the most callers of try_to_unmap() don't care about the
return value at all.  So just need check if page is still mapped by
page_mapped() when necessary.  And page_mapped() does bail out early
when it finds mapped subpage.

Suggested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 include/linux/rmap.h |  2 +-
 mm/huge_memory.c     |  4 +---
 mm/memory-failure.c  | 13 ++++++-------
 mm/rmap.c            |  6 +-----
 mm/vmscan.c          |  3 ++-
 5 files changed, 11 insertions(+), 17 deletions(-)

Comments

Minchan Kim May 25, 2021, 4:46 p.m. UTC | #1
On Tue, May 25, 2021 at 09:21:44AM -0700, Yang Shi wrote:
> Currently try_to_unmap() return bool value by checking page_mapcount(),
> however this may return false positive since page_mapcount() doesn't
> check all subpages of compound page.  The total_mapcount() could be used
> instead, but its cost is higher since it traverses all subpages.
> 
> Actually the most callers of try_to_unmap() don't care about the
> return value at all.  So just need check if page is still mapped by
> page_mapped() when necessary.  And page_mapped() does bail out early
> when it finds mapped subpage.
> 
> Suggested-by: Hugh Dickins <hughd@google.com>
> Signed-off-by: Yang Shi <shy828301@gmail.com>
> ---
>  include/linux/rmap.h |  2 +-
>  mm/huge_memory.c     |  4 +---
>  mm/memory-failure.c  | 13 ++++++-------
>  mm/rmap.c            |  6 +-----
>  mm/vmscan.c          |  3 ++-
>  5 files changed, 11 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index def5c62c93b3..116cb193110a 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
>  int page_referenced(struct page *, int is_locked,
>  			struct mem_cgroup *memcg, unsigned long *vm_flags);
>  
> -bool try_to_unmap(struct page *, enum ttu_flags flags);
> +void try_to_unmap(struct page *, enum ttu_flags flags);
>  
>  /* Avoid racy checks */
>  #define PVMW_SYNC		(1 << 0)
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 19195fca1aee..80fe642d742d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
>  {
>  	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
>  		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> -	bool unmap_success;
>  
>  	VM_BUG_ON_PAGE(!PageHead(page), page);
>  
>  	if (PageAnon(page))
>  		ttu_flags |= TTU_SPLIT_FREEZE;
>  
> -	unmap_success = try_to_unmap(page, ttu_flags);
> -	VM_BUG_ON_PAGE(!unmap_success, page);
> +	try_to_unmap(page, ttu_flags);
>  }
>  
>  static void remap_page(struct page *page, unsigned int nr)
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 9dcc9bcea731..6dd53ff34825 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
>  
>  	if (!PageHuge(hpage)) {
> -		unmap_success = try_to_unmap(hpage, ttu);
> +		try_to_unmap(hpage, ttu);
>  	} else {
>  		if (!PageAnon(hpage)) {
>  			/*
> @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  			 */
>  			mapping = hugetlb_page_mapping_lock_write(hpage);
>  			if (mapping) {
> -				unmap_success = try_to_unmap(hpage,
> -						     ttu|TTU_RMAP_LOCKED);
> +				try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
>  				i_mmap_unlock_write(mapping);
> -			} else {
> +			} else
>  				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> -				unmap_success = false;
> -			}
>  		} else {
> -			unmap_success = try_to_unmap(hpage, ttu);
> +			try_to_unmap(hpage, ttu);
>  		}
>  	}
> +
> +	unmap_success = !page_mapped(hpage);
>  	if (!unmap_success)
>  		pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
>  		       pfn, page_mapcount(hpage));
> diff --git a/mm/rmap.c b/mm/rmap.c
> index a35cbbbded0d..728de421e43a 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
>   *
>   * Tries to remove all the page table entries which are mapping this
>   * page, used in the pageout path.  Caller must hold the page lock.
> - *
> - * If unmap is successful, return true. Otherwise, false.
>   */
> -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> +void try_to_unmap(struct page *page, enum ttu_flags flags)
>  {
>  	struct rmap_walk_control rwc = {
>  		.rmap_one = try_to_unmap_one,
> @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
>  		rmap_walk_locked(page, &rwc);
>  	else
>  		rmap_walk(page, &rwc);
> -
> -	return !page_mapcount(page) ? true : false;

Couldn't we use page_mapped instead of page_mapcount here?
With boolean return of try sematic looks reasonable to me
rather than void.

>  }
>  
>  /**
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index f96d62159720..fa5052ace415 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
>  			if (unlikely(PageTransHuge(page)))
>  				flags |= TTU_SPLIT_HUGE_PMD;
>  
> -			if (!try_to_unmap(page, flags)) {
> +			try_to_unmap(page, flags);
> +			if (page_mapped(page)) {
>  				stat->nr_unmap_fail += nr_pages;
>  				if (!was_swapbacked && PageSwapBacked(page))
>  					stat->nr_lazyfree_fail += nr_pages;
> -- 
> 2.26.2
> 
>
Yang Shi May 25, 2021, 5:07 p.m. UTC | #2
On Tue, May 25, 2021 at 9:46 AM Minchan Kim <minchan@kernel.org> wrote:
>
> On Tue, May 25, 2021 at 09:21:44AM -0700, Yang Shi wrote:
> > Currently try_to_unmap() return bool value by checking page_mapcount(),
> > however this may return false positive since page_mapcount() doesn't
> > check all subpages of compound page.  The total_mapcount() could be used
> > instead, but its cost is higher since it traverses all subpages.
> >
> > Actually the most callers of try_to_unmap() don't care about the
> > return value at all.  So just need check if page is still mapped by
> > page_mapped() when necessary.  And page_mapped() does bail out early
> > when it finds mapped subpage.
> >
> > Suggested-by: Hugh Dickins <hughd@google.com>
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > ---
> >  include/linux/rmap.h |  2 +-
> >  mm/huge_memory.c     |  4 +---
> >  mm/memory-failure.c  | 13 ++++++-------
> >  mm/rmap.c            |  6 +-----
> >  mm/vmscan.c          |  3 ++-
> >  5 files changed, 11 insertions(+), 17 deletions(-)
> >
> > diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> > index def5c62c93b3..116cb193110a 100644
> > --- a/include/linux/rmap.h
> > +++ b/include/linux/rmap.h
> > @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
> >  int page_referenced(struct page *, int is_locked,
> >                       struct mem_cgroup *memcg, unsigned long *vm_flags);
> >
> > -bool try_to_unmap(struct page *, enum ttu_flags flags);
> > +void try_to_unmap(struct page *, enum ttu_flags flags);
> >
> >  /* Avoid racy checks */
> >  #define PVMW_SYNC            (1 << 0)
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 19195fca1aee..80fe642d742d 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
> >  {
> >       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
> >               TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> > -     bool unmap_success;
> >
> >       VM_BUG_ON_PAGE(!PageHead(page), page);
> >
> >       if (PageAnon(page))
> >               ttu_flags |= TTU_SPLIT_FREEZE;
> >
> > -     unmap_success = try_to_unmap(page, ttu_flags);
> > -     VM_BUG_ON_PAGE(!unmap_success, page);
> > +     try_to_unmap(page, ttu_flags);
> >  }
> >
> >  static void remap_page(struct page *page, unsigned int nr)
> > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> > index 9dcc9bcea731..6dd53ff34825 100644
> > --- a/mm/memory-failure.c
> > +++ b/mm/memory-failure.c
> > @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> >               collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
> >
> >       if (!PageHuge(hpage)) {
> > -             unmap_success = try_to_unmap(hpage, ttu);
> > +             try_to_unmap(hpage, ttu);
> >       } else {
> >               if (!PageAnon(hpage)) {
> >                       /*
> > @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> >                        */
> >                       mapping = hugetlb_page_mapping_lock_write(hpage);
> >                       if (mapping) {
> > -                             unmap_success = try_to_unmap(hpage,
> > -                                                  ttu|TTU_RMAP_LOCKED);
> > +                             try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
> >                               i_mmap_unlock_write(mapping);
> > -                     } else {
> > +                     } else
> >                               pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> > -                             unmap_success = false;
> > -                     }
> >               } else {
> > -                     unmap_success = try_to_unmap(hpage, ttu);
> > +                     try_to_unmap(hpage, ttu);
> >               }
> >       }
> > +
> > +     unmap_success = !page_mapped(hpage);
> >       if (!unmap_success)
> >               pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
> >                      pfn, page_mapcount(hpage));
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index a35cbbbded0d..728de421e43a 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
> >   *
> >   * Tries to remove all the page table entries which are mapping this
> >   * page, used in the pageout path.  Caller must hold the page lock.
> > - *
> > - * If unmap is successful, return true. Otherwise, false.
> >   */
> > -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > +void try_to_unmap(struct page *page, enum ttu_flags flags)
> >  {
> >       struct rmap_walk_control rwc = {
> >               .rmap_one = try_to_unmap_one,
> > @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> >               rmap_walk_locked(page, &rwc);
> >       else
> >               rmap_walk(page, &rwc);
> > -
> > -     return !page_mapcount(page) ? true : false;
>
> Couldn't we use page_mapped instead of page_mapcount here?

Yes, of course. Actually this has been discussed in v2 review. Most
(or half) callers actually don't check the return value of
try_to_unmap() except hwpoison, vmscan and THP split. It sounds
suboptimal to have everyone pay the cost. So I thought Hugh's
suggestion made sense to me.

Quoted the discussion below:

> @@ -1777,7 +1779,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
>   else
>   rmap_walk(page, &rwc);
>
> - return !page_mapcount(page) ? true : false;
> + return !total_mapcount(page) ? true : false;

That always made me wince: "return !total_mapcount(page);" surely.

Or slightly better, "return !page_mapped(page);", since at least that
one breaks out as soon as it sees a mapcount.  Though I guess I'm
being silly there, since that case should never occur, so both
total_mapcount() and page_mapped() scan through all pages.

Or better, change try_to_unmap() to void: most callers ignore its
return value anyway, and make their own decisions; the remaining
few could be changed to do the same.  Though again, I may be
being silly, since the expensive THP case is not the common case.


> With boolean return of try sematic looks reasonable to me
> rather than void.
>
> >  }
> >
> >  /**
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index f96d62159720..fa5052ace415 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
> >                       if (unlikely(PageTransHuge(page)))
> >                               flags |= TTU_SPLIT_HUGE_PMD;
> >
> > -                     if (!try_to_unmap(page, flags)) {
> > +                     try_to_unmap(page, flags);
> > +                     if (page_mapped(page)) {
> >                               stat->nr_unmap_fail += nr_pages;
> >                               if (!was_swapbacked && PageSwapBacked(page))
> >                                       stat->nr_lazyfree_fail += nr_pages;
> > --
> > 2.26.2
> >
> >
Minchan Kim May 25, 2021, 5:34 p.m. UTC | #3
On Tue, May 25, 2021 at 10:07:05AM -0700, Yang Shi wrote:
> On Tue, May 25, 2021 at 9:46 AM Minchan Kim <minchan@kernel.org> wrote:
> >
> > On Tue, May 25, 2021 at 09:21:44AM -0700, Yang Shi wrote:
> > > Currently try_to_unmap() return bool value by checking page_mapcount(),
> > > however this may return false positive since page_mapcount() doesn't
> > > check all subpages of compound page.  The total_mapcount() could be used
> > > instead, but its cost is higher since it traverses all subpages.
> > >
> > > Actually the most callers of try_to_unmap() don't care about the
> > > return value at all.  So just need check if page is still mapped by
> > > page_mapped() when necessary.  And page_mapped() does bail out early
> > > when it finds mapped subpage.
> > >
> > > Suggested-by: Hugh Dickins <hughd@google.com>
> > > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > > ---
> > >  include/linux/rmap.h |  2 +-
> > >  mm/huge_memory.c     |  4 +---
> > >  mm/memory-failure.c  | 13 ++++++-------
> > >  mm/rmap.c            |  6 +-----
> > >  mm/vmscan.c          |  3 ++-
> > >  5 files changed, 11 insertions(+), 17 deletions(-)
> > >
> > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> > > index def5c62c93b3..116cb193110a 100644
> > > --- a/include/linux/rmap.h
> > > +++ b/include/linux/rmap.h
> > > @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
> > >  int page_referenced(struct page *, int is_locked,
> > >                       struct mem_cgroup *memcg, unsigned long *vm_flags);
> > >
> > > -bool try_to_unmap(struct page *, enum ttu_flags flags);
> > > +void try_to_unmap(struct page *, enum ttu_flags flags);
> > >
> > >  /* Avoid racy checks */
> > >  #define PVMW_SYNC            (1 << 0)
> > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > > index 19195fca1aee..80fe642d742d 100644
> > > --- a/mm/huge_memory.c
> > > +++ b/mm/huge_memory.c
> > > @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
> > >  {
> > >       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
> > >               TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> > > -     bool unmap_success;
> > >
> > >       VM_BUG_ON_PAGE(!PageHead(page), page);
> > >
> > >       if (PageAnon(page))
> > >               ttu_flags |= TTU_SPLIT_FREEZE;
> > >
> > > -     unmap_success = try_to_unmap(page, ttu_flags);
> > > -     VM_BUG_ON_PAGE(!unmap_success, page);
> > > +     try_to_unmap(page, ttu_flags);
> > >  }
> > >
> > >  static void remap_page(struct page *page, unsigned int nr)
> > > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> > > index 9dcc9bcea731..6dd53ff34825 100644
> > > --- a/mm/memory-failure.c
> > > +++ b/mm/memory-failure.c
> > > @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> > >               collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
> > >
> > >       if (!PageHuge(hpage)) {
> > > -             unmap_success = try_to_unmap(hpage, ttu);
> > > +             try_to_unmap(hpage, ttu);
> > >       } else {
> > >               if (!PageAnon(hpage)) {
> > >                       /*
> > > @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> > >                        */
> > >                       mapping = hugetlb_page_mapping_lock_write(hpage);
> > >                       if (mapping) {
> > > -                             unmap_success = try_to_unmap(hpage,
> > > -                                                  ttu|TTU_RMAP_LOCKED);
> > > +                             try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
> > >                               i_mmap_unlock_write(mapping);
> > > -                     } else {
> > > +                     } else
> > >                               pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> > > -                             unmap_success = false;
> > > -                     }
> > >               } else {
> > > -                     unmap_success = try_to_unmap(hpage, ttu);
> > > +                     try_to_unmap(hpage, ttu);
> > >               }
> > >       }
> > > +
> > > +     unmap_success = !page_mapped(hpage);
> > >       if (!unmap_success)
> > >               pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
> > >                      pfn, page_mapcount(hpage));
> > > diff --git a/mm/rmap.c b/mm/rmap.c
> > > index a35cbbbded0d..728de421e43a 100644
> > > --- a/mm/rmap.c
> > > +++ b/mm/rmap.c
> > > @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
> > >   *
> > >   * Tries to remove all the page table entries which are mapping this
> > >   * page, used in the pageout path.  Caller must hold the page lock.
> > > - *
> > > - * If unmap is successful, return true. Otherwise, false.
> > >   */
> > > -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > > +void try_to_unmap(struct page *page, enum ttu_flags flags)
> > >  {
> > >       struct rmap_walk_control rwc = {
> > >               .rmap_one = try_to_unmap_one,
> > > @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > >               rmap_walk_locked(page, &rwc);
> > >       else
> > >               rmap_walk(page, &rwc);
> > > -
> > > -     return !page_mapcount(page) ? true : false;
> >
> > Couldn't we use page_mapped instead of page_mapcount here?
> 
> Yes, of course. Actually this has been discussed in v2 review. Most
> (or half) callers actually don't check the return value of
> try_to_unmap() except hwpoison, vmscan and THP split. It sounds
> suboptimal to have everyone pay the cost. So I thought Hugh's
> suggestion made sense to me.

Not sure most callers ignore the ret. I am seeing only do_migrate_range
ignores it. Other than that, they checked the success with page_mapped
in the end. 

With returning void, I feel like it's not try sematic function
any longer. If you still want to go with it, I suggest adding
some comment how to check the function's successness in the
comment place you removed above.

> 
> Quoted the discussion below:
> 
> > @@ -1777,7 +1779,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> >   else
> >   rmap_walk(page, &rwc);
> >
> > - return !page_mapcount(page) ? true : false;
> > + return !total_mapcount(page) ? true : false;
> 
> That always made me wince: "return !total_mapcount(page);" surely.
> 
> Or slightly better, "return !page_mapped(page);", since at least that
> one breaks out as soon as it sees a mapcount.  Though I guess I'm
> being silly there, since that case should never occur, so both
> total_mapcount() and page_mapped() scan through all pages.
> 
> Or better, change try_to_unmap() to void: most callers ignore its
> return value anyway, and make their own decisions; the remaining
> few could be changed to do the same.  Though again, I may be
> being silly, since the expensive THP case is not the common case.
> 
> 
> > With boolean return of try sematic looks reasonable to me
> > rather than void.
> >
> > >  }
> > >
> > >  /**
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index f96d62159720..fa5052ace415 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
> > >                       if (unlikely(PageTransHuge(page)))
> > >                               flags |= TTU_SPLIT_HUGE_PMD;
> > >
> > > -                     if (!try_to_unmap(page, flags)) {
> > > +                     try_to_unmap(page, flags);
> > > +                     if (page_mapped(page)) {
> > >                               stat->nr_unmap_fail += nr_pages;
> > >                               if (!was_swapbacked && PageSwapBacked(page))
> > >                                       stat->nr_lazyfree_fail += nr_pages;
> > > --
> > > 2.26.2
> > >
> > >
Yang Shi May 25, 2021, 9:04 p.m. UTC | #4
On Tue, May 25, 2021 at 10:34 AM Minchan Kim <minchan@kernel.org> wrote:
>
> On Tue, May 25, 2021 at 10:07:05AM -0700, Yang Shi wrote:
> > On Tue, May 25, 2021 at 9:46 AM Minchan Kim <minchan@kernel.org> wrote:
> > >
> > > On Tue, May 25, 2021 at 09:21:44AM -0700, Yang Shi wrote:
> > > > Currently try_to_unmap() return bool value by checking page_mapcount(),
> > > > however this may return false positive since page_mapcount() doesn't
> > > > check all subpages of compound page.  The total_mapcount() could be used
> > > > instead, but its cost is higher since it traverses all subpages.
> > > >
> > > > Actually the most callers of try_to_unmap() don't care about the
> > > > return value at all.  So just need check if page is still mapped by
> > > > page_mapped() when necessary.  And page_mapped() does bail out early
> > > > when it finds mapped subpage.
> > > >
> > > > Suggested-by: Hugh Dickins <hughd@google.com>
> > > > Signed-off-by: Yang Shi <shy828301@gmail.com>
> > > > ---
> > > >  include/linux/rmap.h |  2 +-
> > > >  mm/huge_memory.c     |  4 +---
> > > >  mm/memory-failure.c  | 13 ++++++-------
> > > >  mm/rmap.c            |  6 +-----
> > > >  mm/vmscan.c          |  3 ++-
> > > >  5 files changed, 11 insertions(+), 17 deletions(-)
> > > >
> > > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> > > > index def5c62c93b3..116cb193110a 100644
> > > > --- a/include/linux/rmap.h
> > > > +++ b/include/linux/rmap.h
> > > > @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
> > > >  int page_referenced(struct page *, int is_locked,
> > > >                       struct mem_cgroup *memcg, unsigned long *vm_flags);
> > > >
> > > > -bool try_to_unmap(struct page *, enum ttu_flags flags);
> > > > +void try_to_unmap(struct page *, enum ttu_flags flags);
> > > >
> > > >  /* Avoid racy checks */
> > > >  #define PVMW_SYNC            (1 << 0)
> > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > > > index 19195fca1aee..80fe642d742d 100644
> > > > --- a/mm/huge_memory.c
> > > > +++ b/mm/huge_memory.c
> > > > @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
> > > >  {
> > > >       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
> > > >               TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> > > > -     bool unmap_success;
> > > >
> > > >       VM_BUG_ON_PAGE(!PageHead(page), page);
> > > >
> > > >       if (PageAnon(page))
> > > >               ttu_flags |= TTU_SPLIT_FREEZE;
> > > >
> > > > -     unmap_success = try_to_unmap(page, ttu_flags);
> > > > -     VM_BUG_ON_PAGE(!unmap_success, page);
> > > > +     try_to_unmap(page, ttu_flags);
> > > >  }
> > > >
> > > >  static void remap_page(struct page *page, unsigned int nr)
> > > > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> > > > index 9dcc9bcea731..6dd53ff34825 100644
> > > > --- a/mm/memory-failure.c
> > > > +++ b/mm/memory-failure.c
> > > > @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> > > >               collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
> > > >
> > > >       if (!PageHuge(hpage)) {
> > > > -             unmap_success = try_to_unmap(hpage, ttu);
> > > > +             try_to_unmap(hpage, ttu);
> > > >       } else {
> > > >               if (!PageAnon(hpage)) {
> > > >                       /*
> > > > @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> > > >                        */
> > > >                       mapping = hugetlb_page_mapping_lock_write(hpage);
> > > >                       if (mapping) {
> > > > -                             unmap_success = try_to_unmap(hpage,
> > > > -                                                  ttu|TTU_RMAP_LOCKED);
> > > > +                             try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
> > > >                               i_mmap_unlock_write(mapping);
> > > > -                     } else {
> > > > +                     } else
> > > >                               pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> > > > -                             unmap_success = false;
> > > > -                     }
> > > >               } else {
> > > > -                     unmap_success = try_to_unmap(hpage, ttu);
> > > > +                     try_to_unmap(hpage, ttu);
> > > >               }
> > > >       }
> > > > +
> > > > +     unmap_success = !page_mapped(hpage);
> > > >       if (!unmap_success)
> > > >               pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
> > > >                      pfn, page_mapcount(hpage));
> > > > diff --git a/mm/rmap.c b/mm/rmap.c
> > > > index a35cbbbded0d..728de421e43a 100644
> > > > --- a/mm/rmap.c
> > > > +++ b/mm/rmap.c
> > > > @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
> > > >   *
> > > >   * Tries to remove all the page table entries which are mapping this
> > > >   * page, used in the pageout path.  Caller must hold the page lock.
> > > > - *
> > > > - * If unmap is successful, return true. Otherwise, false.
> > > >   */
> > > > -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > > > +void try_to_unmap(struct page *page, enum ttu_flags flags)
> > > >  {
> > > >       struct rmap_walk_control rwc = {
> > > >               .rmap_one = try_to_unmap_one,
> > > > @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > > >               rmap_walk_locked(page, &rwc);
> > > >       else
> > > >               rmap_walk(page, &rwc);
> > > > -
> > > > -     return !page_mapcount(page) ? true : false;
> > >
> > > Couldn't we use page_mapped instead of page_mapcount here?
> >
> > Yes, of course. Actually this has been discussed in v2 review. Most
> > (or half) callers actually don't check the return value of
> > try_to_unmap() except hwpoison, vmscan and THP split. It sounds
> > suboptimal to have everyone pay the cost. So I thought Hugh's
> > suggestion made sense to me.
>
> Not sure most callers ignore the ret. I am seeing only do_migrate_range
> ignores it. Other than that, they checked the success with page_mapped
> in the end.

I'd think this falls into the "ignore" category as well since the code
doesn't check the return value of try_to_unmap() :-). The patch does
convert the return value check to page_mapped() check right after
try_to_unmap().

>
> With returning void, I feel like it's not try sematic function
> any longer. If you still want to go with it, I suggest adding
> some comment how to check the function's successness in the
> comment place you removed above.

Thanks for the suggestion. Will add some notes about how to do the check.

>
> >
> > Quoted the discussion below:
> >
> > > @@ -1777,7 +1779,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > >   else
> > >   rmap_walk(page, &rwc);
> > >
> > > - return !page_mapcount(page) ? true : false;
> > > + return !total_mapcount(page) ? true : false;
> >
> > That always made me wince: "return !total_mapcount(page);" surely.
> >
> > Or slightly better, "return !page_mapped(page);", since at least that
> > one breaks out as soon as it sees a mapcount.  Though I guess I'm
> > being silly there, since that case should never occur, so both
> > total_mapcount() and page_mapped() scan through all pages.
> >
> > Or better, change try_to_unmap() to void: most callers ignore its
> > return value anyway, and make their own decisions; the remaining
> > few could be changed to do the same.  Though again, I may be
> > being silly, since the expensive THP case is not the common case.
> >
> >
> > > With boolean return of try sematic looks reasonable to me
> > > rather than void.
> > >
> > > >  }
> > > >
> > > >  /**
> > > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > > index f96d62159720..fa5052ace415 100644
> > > > --- a/mm/vmscan.c
> > > > +++ b/mm/vmscan.c
> > > > @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
> > > >                       if (unlikely(PageTransHuge(page)))
> > > >                               flags |= TTU_SPLIT_HUGE_PMD;
> > > >
> > > > -                     if (!try_to_unmap(page, flags)) {
> > > > +                     try_to_unmap(page, flags);
> > > > +                     if (page_mapped(page)) {
> > > >                               stat->nr_unmap_fail += nr_pages;
> > > >                               if (!was_swapbacked && PageSwapBacked(page))
> > > >                                       stat->nr_lazyfree_fail += nr_pages;
> > > > --
> > > > 2.26.2
> > > >
> > > >
Hugh Dickins May 25, 2021, 9:11 p.m. UTC | #5
On Tue, 25 May 2021, Yang Shi wrote:

> Currently try_to_unmap() return bool value by checking page_mapcount(),
> however this may return false positive since page_mapcount() doesn't
> check all subpages of compound page.  The total_mapcount() could be used
> instead, but its cost is higher since it traverses all subpages.
> 
> Actually the most callers of try_to_unmap() don't care about the
> return value at all.  So just need check if page is still mapped by
> page_mapped() when necessary.  And page_mapped() does bail out early
> when it finds mapped subpage.
> 
> Suggested-by: Hugh Dickins <hughd@google.com>
> Signed-off-by: Yang Shi <shy828301@gmail.com>

Thanks for doing this, I like it, you can add
Acked-by: Hugh Dickins <hughd@google.com>

Let's all ignore checkpatch's complaint on "struct page *":
that try_to_unmap() prototype is consistent with others in rmap.h,
and changing them to "struct page *page" adds no information at all.

But a nit in mm/memory-failure.c: unmap_success no longer needs
to be initialized to true on entry to hwpoison_user_mappings().

More seriously, I question the ordering of the two patches:
I think this 1/2 should be 2/2, and the other be 1/2.  The other is
the one which replaces a BUG by a WARN, which will be wanted Cc stable;
whereas this one is really just a cleanup - I don't think there's a
serious consequence from the faint possibility of wrong unmap_success
on THP, in either memory-failure.c or vmscan.c - wrong message, wrong
stats, but more than that? And memory-failure.c on THP somewhat a
work in progress IIUC.

Unfortunately, they can't just be swapped but need rediffing.
Or do you disagree that's a better ordering?

As it stands, I imagine my series of THP unmap fixes (most needing Cc
stable) coming on top of your "mm: thp: check page_mapped instead of
page_mapcount for split" (umm, that's a bad title), but before this
cleanup.

Though I'm responding before I've looked through the stable trees:
it's not of overriding importance, but I'll want to try to minimize
the number of patches that need redoing for older stables, which might
affect the ordering.  (And it might end up easiest if I bring your two
patches into my series, then ask Andrew to replace.)

Further complicated by Andrew having brought in the nouveau reorg of
mm/rmap.c (oh, and that touches mm/huge_memory.c unmap_page() too).
I expect I'll have to send Andrew a series to go before all that,
together with fixups to that to sit on top of it.  Sigh.  Well,
we can only blame me for not getting those THP fixes in sooner.

Hugh

> ---
>  include/linux/rmap.h |  2 +-
>  mm/huge_memory.c     |  4 +---
>  mm/memory-failure.c  | 13 ++++++-------
>  mm/rmap.c            |  6 +-----
>  mm/vmscan.c          |  3 ++-
>  5 files changed, 11 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index def5c62c93b3..116cb193110a 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
>  int page_referenced(struct page *, int is_locked,
>  			struct mem_cgroup *memcg, unsigned long *vm_flags);
>  
> -bool try_to_unmap(struct page *, enum ttu_flags flags);
> +void try_to_unmap(struct page *, enum ttu_flags flags);
>  
>  /* Avoid racy checks */
>  #define PVMW_SYNC		(1 << 0)
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 19195fca1aee..80fe642d742d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
>  {
>  	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
>  		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> -	bool unmap_success;
>  
>  	VM_BUG_ON_PAGE(!PageHead(page), page);
>  
>  	if (PageAnon(page))
>  		ttu_flags |= TTU_SPLIT_FREEZE;
>  
> -	unmap_success = try_to_unmap(page, ttu_flags);
> -	VM_BUG_ON_PAGE(!unmap_success, page);
> +	try_to_unmap(page, ttu_flags);
>  }
>  
>  static void remap_page(struct page *page, unsigned int nr)
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 9dcc9bcea731..6dd53ff34825 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
>  
>  	if (!PageHuge(hpage)) {
> -		unmap_success = try_to_unmap(hpage, ttu);
> +		try_to_unmap(hpage, ttu);
>  	} else {
>  		if (!PageAnon(hpage)) {
>  			/*
> @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>  			 */
>  			mapping = hugetlb_page_mapping_lock_write(hpage);
>  			if (mapping) {
> -				unmap_success = try_to_unmap(hpage,
> -						     ttu|TTU_RMAP_LOCKED);
> +				try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
>  				i_mmap_unlock_write(mapping);
> -			} else {
> +			} else
>  				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> -				unmap_success = false;
> -			}
>  		} else {
> -			unmap_success = try_to_unmap(hpage, ttu);
> +			try_to_unmap(hpage, ttu);
>  		}
>  	}
> +
> +	unmap_success = !page_mapped(hpage);
>  	if (!unmap_success)
>  		pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
>  		       pfn, page_mapcount(hpage));
> diff --git a/mm/rmap.c b/mm/rmap.c
> index a35cbbbded0d..728de421e43a 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
>   *
>   * Tries to remove all the page table entries which are mapping this
>   * page, used in the pageout path.  Caller must hold the page lock.
> - *
> - * If unmap is successful, return true. Otherwise, false.
>   */
> -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> +void try_to_unmap(struct page *page, enum ttu_flags flags)
>  {
>  	struct rmap_walk_control rwc = {
>  		.rmap_one = try_to_unmap_one,
> @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
>  		rmap_walk_locked(page, &rwc);
>  	else
>  		rmap_walk(page, &rwc);
> -
> -	return !page_mapcount(page) ? true : false;
>  }
>  
>  /**
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index f96d62159720..fa5052ace415 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
>  			if (unlikely(PageTransHuge(page)))
>  				flags |= TTU_SPLIT_HUGE_PMD;
>  
> -			if (!try_to_unmap(page, flags)) {
> +			try_to_unmap(page, flags);
> +			if (page_mapped(page)) {
>  				stat->nr_unmap_fail += nr_pages;
>  				if (!was_swapbacked && PageSwapBacked(page))
>  					stat->nr_lazyfree_fail += nr_pages;
> -- 
> 2.26.2
Yang Shi May 25, 2021, 10:33 p.m. UTC | #6
On Tue, May 25, 2021 at 2:12 PM Hugh Dickins <hughd@google.com> wrote:
>
> On Tue, 25 May 2021, Yang Shi wrote:
>
> > Currently try_to_unmap() return bool value by checking page_mapcount(),
> > however this may return false positive since page_mapcount() doesn't
> > check all subpages of compound page.  The total_mapcount() could be used
> > instead, but its cost is higher since it traverses all subpages.
> >
> > Actually the most callers of try_to_unmap() don't care about the
> > return value at all.  So just need check if page is still mapped by
> > page_mapped() when necessary.  And page_mapped() does bail out early
> > when it finds mapped subpage.
> >
> > Suggested-by: Hugh Dickins <hughd@google.com>
> > Signed-off-by: Yang Shi <shy828301@gmail.com>
>
> Thanks for doing this, I like it, you can add
> Acked-by: Hugh Dickins <hughd@google.com>

Thank you.

>
> Let's all ignore checkpatch's complaint on "struct page *":
> that try_to_unmap() prototype is consistent with others in rmap.h,
> and changing them to "struct page *page" adds no information at all.
>
> But a nit in mm/memory-failure.c: unmap_success no longer needs
> to be initialized to true on entry to hwpoison_user_mappings().

Will fix it.

>
> More seriously, I question the ordering of the two patches:
> I think this 1/2 should be 2/2, and the other be 1/2.  The other is
> the one which replaces a BUG by a WARN, which will be wanted Cc stable;
> whereas this one is really just a cleanup - I don't think there's a
> serious consequence from the faint possibility of wrong unmap_success
> on THP, in either memory-failure.c or vmscan.c - wrong message, wrong
> stats, but more than that? And memory-failure.c on THP somewhat a
> work in progress IIUC.
>
> Unfortunately, they can't just be swapped but need rediffing.
> Or do you disagree that's a better ordering?

No, I don't. Will reorder them in the next version, anyway I need to
fix something.

>
> As it stands, I imagine my series of THP unmap fixes (most needing Cc
> stable) coming on top of your "mm: thp: check page_mapped instead of
> page_mapcount for split" (umm, that's a bad title), but before this
> cleanup.
>
> Though I'm responding before I've looked through the stable trees:
> it's not of overriding importance, but I'll want to try to minimize
> the number of patches that need redoing for older stables, which might
> affect the ordering.  (And it might end up easiest if I bring your two
> patches into my series, then ask Andrew to replace.)
>
> Further complicated by Andrew having brought in the nouveau reorg of
> mm/rmap.c (oh, and that touches mm/huge_memory.c unmap_page() too).
> I expect I'll have to send Andrew a series to go before all that,
> together with fixups to that to sit on top of it.  Sigh.  Well,
> we can only blame me for not getting those THP fixes in sooner.
>
> Hugh
>
> > ---
> >  include/linux/rmap.h |  2 +-
> >  mm/huge_memory.c     |  4 +---
> >  mm/memory-failure.c  | 13 ++++++-------
> >  mm/rmap.c            |  6 +-----
> >  mm/vmscan.c          |  3 ++-
> >  5 files changed, 11 insertions(+), 17 deletions(-)
> >
> > diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> > index def5c62c93b3..116cb193110a 100644
> > --- a/include/linux/rmap.h
> > +++ b/include/linux/rmap.h
> > @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
> >  int page_referenced(struct page *, int is_locked,
> >                       struct mem_cgroup *memcg, unsigned long *vm_flags);
> >
> > -bool try_to_unmap(struct page *, enum ttu_flags flags);
> > +void try_to_unmap(struct page *, enum ttu_flags flags);
> >
> >  /* Avoid racy checks */
> >  #define PVMW_SYNC            (1 << 0)
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 19195fca1aee..80fe642d742d 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page)
> >  {
> >       enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
> >               TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
> > -     bool unmap_success;
> >
> >       VM_BUG_ON_PAGE(!PageHead(page), page);
> >
> >       if (PageAnon(page))
> >               ttu_flags |= TTU_SPLIT_FREEZE;
> >
> > -     unmap_success = try_to_unmap(page, ttu_flags);
> > -     VM_BUG_ON_PAGE(!unmap_success, page);
> > +     try_to_unmap(page, ttu_flags);
> >  }
> >
> >  static void remap_page(struct page *page, unsigned int nr)
> > diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> > index 9dcc9bcea731..6dd53ff34825 100644
> > --- a/mm/memory-failure.c
> > +++ b/mm/memory-failure.c
> > @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> >               collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
> >
> >       if (!PageHuge(hpage)) {
> > -             unmap_success = try_to_unmap(hpage, ttu);
> > +             try_to_unmap(hpage, ttu);
> >       } else {
> >               if (!PageAnon(hpage)) {
> >                       /*
> > @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
> >                        */
> >                       mapping = hugetlb_page_mapping_lock_write(hpage);
> >                       if (mapping) {
> > -                             unmap_success = try_to_unmap(hpage,
> > -                                                  ttu|TTU_RMAP_LOCKED);
> > +                             try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
> >                               i_mmap_unlock_write(mapping);
> > -                     } else {
> > +                     } else
> >                               pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
> > -                             unmap_success = false;
> > -                     }
> >               } else {
> > -                     unmap_success = try_to_unmap(hpage, ttu);
> > +                     try_to_unmap(hpage, ttu);
> >               }
> >       }
> > +
> > +     unmap_success = !page_mapped(hpage);
> >       if (!unmap_success)
> >               pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
> >                      pfn, page_mapcount(hpage));
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index a35cbbbded0d..728de421e43a 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page)
> >   *
> >   * Tries to remove all the page table entries which are mapping this
> >   * page, used in the pageout path.  Caller must hold the page lock.
> > - *
> > - * If unmap is successful, return true. Otherwise, false.
> >   */
> > -bool try_to_unmap(struct page *page, enum ttu_flags flags)
> > +void try_to_unmap(struct page *page, enum ttu_flags flags)
> >  {
> >       struct rmap_walk_control rwc = {
> >               .rmap_one = try_to_unmap_one,
> > @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
> >               rmap_walk_locked(page, &rwc);
> >       else
> >               rmap_walk(page, &rwc);
> > -
> > -     return !page_mapcount(page) ? true : false;
> >  }
> >
> >  /**
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index f96d62159720..fa5052ace415 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
> >                       if (unlikely(PageTransHuge(page)))
> >                               flags |= TTU_SPLIT_HUGE_PMD;
> >
> > -                     if (!try_to_unmap(page, flags)) {
> > +                     try_to_unmap(page, flags);
> > +                     if (page_mapped(page)) {
> >                               stat->nr_unmap_fail += nr_pages;
> >                               if (!was_swapbacked && PageSwapBacked(page))
> >                                       stat->nr_lazyfree_fail += nr_pages;
> > --
> > 2.26.2
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..116cb193110a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -194,7 +194,7 @@  static inline void page_dup_rmap(struct page *page, bool compound)
 int page_referenced(struct page *, int is_locked,
 			struct mem_cgroup *memcg, unsigned long *vm_flags);
 
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+void try_to_unmap(struct page *, enum ttu_flags flags);
 
 /* Avoid racy checks */
 #define PVMW_SYNC		(1 << 0)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 19195fca1aee..80fe642d742d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2336,15 +2336,13 @@  static void unmap_page(struct page *page)
 {
 	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
 		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
-	bool unmap_success;
 
 	VM_BUG_ON_PAGE(!PageHead(page), page);
 
 	if (PageAnon(page))
 		ttu_flags |= TTU_SPLIT_FREEZE;
 
-	unmap_success = try_to_unmap(page, ttu_flags);
-	VM_BUG_ON_PAGE(!unmap_success, page);
+	try_to_unmap(page, ttu_flags);
 }
 
 static void remap_page(struct page *page, unsigned int nr)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9dcc9bcea731..6dd53ff34825 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1126,7 +1126,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
 
 	if (!PageHuge(hpage)) {
-		unmap_success = try_to_unmap(hpage, ttu);
+		try_to_unmap(hpage, ttu);
 	} else {
 		if (!PageAnon(hpage)) {
 			/*
@@ -1138,17 +1138,16 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 			 */
 			mapping = hugetlb_page_mapping_lock_write(hpage);
 			if (mapping) {
-				unmap_success = try_to_unmap(hpage,
-						     ttu|TTU_RMAP_LOCKED);
+				try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
 				i_mmap_unlock_write(mapping);
-			} else {
+			} else
 				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
-				unmap_success = false;
-			}
 		} else {
-			unmap_success = try_to_unmap(hpage, ttu);
+			try_to_unmap(hpage, ttu);
 		}
 	}
+
+	unmap_success = !page_mapped(hpage);
 	if (!unmap_success)
 		pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
 		       pfn, page_mapcount(hpage));
diff --git a/mm/rmap.c b/mm/rmap.c
index a35cbbbded0d..728de421e43a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1748,10 +1748,8 @@  static int page_not_mapped(struct page *page)
  *
  * Tries to remove all the page table entries which are mapping this
  * page, used in the pageout path.  Caller must hold the page lock.
- *
- * If unmap is successful, return true. Otherwise, false.
  */
-bool try_to_unmap(struct page *page, enum ttu_flags flags)
+void try_to_unmap(struct page *page, enum ttu_flags flags)
 {
 	struct rmap_walk_control rwc = {
 		.rmap_one = try_to_unmap_one,
@@ -1776,8 +1774,6 @@  bool try_to_unmap(struct page *page, enum ttu_flags flags)
 		rmap_walk_locked(page, &rwc);
 	else
 		rmap_walk(page, &rwc);
-
-	return !page_mapcount(page) ? true : false;
 }
 
 /**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f96d62159720..fa5052ace415 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1499,7 +1499,8 @@  static unsigned int shrink_page_list(struct list_head *page_list,
 			if (unlikely(PageTransHuge(page)))
 				flags |= TTU_SPLIT_HUGE_PMD;
 
-			if (!try_to_unmap(page, flags)) {
+			try_to_unmap(page, flags);
+			if (page_mapped(page)) {
 				stat->nr_unmap_fail += nr_pages;
 				if (!was_swapbacked && PageSwapBacked(page))
 					stat->nr_lazyfree_fail += nr_pages;