diff mbox series

[v17,18/21] mm/lru: introduce the relock_page_lruvec function

Message ID 1595681998-19193-19-git-send-email-alex.shi@linux.alibaba.com
State New
Headers show
Series per memcg lru lock | expand

Commit Message

Alex Shi July 25, 2020, 12:59 p.m. UTC
Use this new function to replace repeated same code, no func change.

Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Hugh Dickins <hughd@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-kernel@vger.kernel.org
Cc: cgroups@vger.kernel.org
Cc: linux-mm@kvack.org
---
 include/linux/memcontrol.h | 40 ++++++++++++++++++++++++++++++++++++++++
 mm/mlock.c                 |  9 +--------
 mm/swap.c                  | 33 +++++++--------------------------
 mm/vmscan.c                |  8 +-------
 4 files changed, 49 insertions(+), 41 deletions(-)

Comments

Alexander Duyck July 29, 2020, 5:52 p.m. UTC | #1
On Sat, Jul 25, 2020 at 6:00 AM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>
> Use this new function to replace repeated same code, no func change.
>
> Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Mel Gorman <mgorman@techsingularity.net>
> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Tejun Heo <tj@kernel.org>
> Cc: linux-kernel@vger.kernel.org
> Cc: cgroups@vger.kernel.org
> Cc: linux-mm@kvack.org
> ---
>  include/linux/memcontrol.h | 40 ++++++++++++++++++++++++++++++++++++++++
>  mm/mlock.c                 |  9 +--------
>  mm/swap.c                  | 33 +++++++--------------------------
>  mm/vmscan.c                |  8 +-------
>  4 files changed, 49 insertions(+), 41 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 258901021c6c..6e670f991b42 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1313,6 +1313,46 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
>         spin_unlock_irqrestore(&lruvec->lru_lock, flags);
>  }
>
> +/* Don't lock again iff page's lruvec locked */
> +static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
> +               struct lruvec *locked_lruvec)
> +{
> +       struct pglist_data *pgdat = page_pgdat(page);
> +       bool locked;
> +
> +       rcu_read_lock();
> +       locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
> +       rcu_read_unlock();
> +
> +       if (locked)
> +               return locked_lruvec;
> +
> +       if (locked_lruvec)
> +               unlock_page_lruvec_irq(locked_lruvec);
> +
> +       return lock_page_lruvec_irq(page);
> +}
> +
> +/* Don't lock again iff page's lruvec locked */
> +static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
> +               struct lruvec *locked_lruvec, unsigned long *flags)
> +{
> +       struct pglist_data *pgdat = page_pgdat(page);
> +       bool locked;
> +
> +       rcu_read_lock();
> +       locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
> +       rcu_read_unlock();
> +
> +       if (locked)
> +               return locked_lruvec;
> +
> +       if (locked_lruvec)
> +               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> +
> +       return lock_page_lruvec_irqsave(page, flags);
> +}
> +

So looking these over they seem to be pretty inefficient for what they
do. Basically in worst case (locked_lruvec == NULL) you end up calling
mem_cgoup_page_lruvec and all the rcu_read_lock/unlock a couple times
for a single page. It might make more sense to structure this like:
if (locked_lruvec) {
    if (lruvec_holds_page_lru_lock(page, locked_lruvec))
        return locked_lruvec;

    unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
}
return lock_page_lruvec_irqsave(page, flags);

The other piece that has me scratching my head is that I wonder if we
couldn't do this without needing the rcu_read_lock. For example, what
if we were to compare the page mem_cgroup pointer to the memcg back
pointer stored in the mem_cgroup_per_node? It seems like ordering
things this way would significantly reduce the overhead due to the
pointer chasing to see if the page is in the locked lruvec or not.

>  #ifdef CONFIG_CGROUP_WRITEBACK
>
>  struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
> diff --git a/mm/mlock.c b/mm/mlock.c
> index 5d40d259a931..bc2fb3bfbe7a 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -303,17 +303,10 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
>         /* Phase 1: page isolation */
>         for (i = 0; i < nr; i++) {
>                 struct page *page = pvec->pages[i];
> -               struct lruvec *new_lruvec;
>
>                 /* block memcg change in mem_cgroup_move_account */
>                 lock_page_memcg(page);
> -               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> -               if (new_lruvec != lruvec) {
> -                       if (lruvec)
> -                               unlock_page_lruvec_irq(lruvec);
> -                       lruvec = lock_page_lruvec_irq(page);
> -               }
> -
> +               lruvec = relock_page_lruvec_irq(page, lruvec);
>                 if (TestClearPageMlocked(page)) {
>                         /*
>                          * We already have pin from follow_page_mask()
> diff --git a/mm/swap.c b/mm/swap.c
> index 09edac441eb6..6d9c7288f7de 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -209,19 +209,12 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
>
>         for (i = 0; i < pagevec_count(pvec); i++) {
>                 struct page *page = pvec->pages[i];
> -               struct lruvec *new_lruvec;
>
>                 /* block memcg migration during page moving between lru */
>                 if (!TestClearPageLRU(page))
>                         continue;
>
> -               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> -               if (lruvec != new_lruvec) {
> -                       if (lruvec)
> -                               unlock_page_lruvec_irqrestore(lruvec, flags);
> -                       lruvec = lock_page_lruvec_irqsave(page, &flags);
> -               }
> -
> +               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
>                 (*move_fn)(page, lruvec);
>
>                 SetPageLRU(page);
> @@ -864,17 +857,12 @@ void release_pages(struct page **pages, int nr)
>                 }
>
>                 if (PageLRU(page)) {
> -                       struct lruvec *new_lruvec;
> -
> -                       new_lruvec = mem_cgroup_page_lruvec(page,
> -                                                       page_pgdat(page));
> -                       if (new_lruvec != lruvec) {
> -                               if (lruvec)
> -                                       unlock_page_lruvec_irqrestore(lruvec,
> -                                                                       flags);
> +                       struct lruvec *prev_lruvec = lruvec;
> +
> +                       lruvec = relock_page_lruvec_irqsave(page, lruvec,
> +                                                                       &flags);
> +                       if (prev_lruvec != lruvec)
>                                 lock_batch = 0;
> -                               lruvec = lock_page_lruvec_irqsave(page, &flags);
> -                       }
>
>                         __ClearPageLRU(page);
>                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
> @@ -980,15 +968,8 @@ void __pagevec_lru_add(struct pagevec *pvec)
>
>         for (i = 0; i < pagevec_count(pvec); i++) {
>                 struct page *page = pvec->pages[i];
> -               struct lruvec *new_lruvec;
> -
> -               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> -               if (lruvec != new_lruvec) {
> -                       if (lruvec)
> -                               unlock_page_lruvec_irqrestore(lruvec, flags);
> -                       lruvec = lock_page_lruvec_irqsave(page, &flags);
> -               }
>
> +               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
>                 __pagevec_lru_add_fn(page, lruvec);
>         }
>         if (lruvec)
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 168c1659e430..bdb53a678e7e 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4292,15 +4292,9 @@ void check_move_unevictable_pages(struct pagevec *pvec)
>
>         for (i = 0; i < pvec->nr; i++) {
>                 struct page *page = pvec->pages[i];
> -               struct lruvec *new_lruvec;
>
>                 pgscanned++;
> -               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> -               if (lruvec != new_lruvec) {
> -                       if (lruvec)
> -                               unlock_page_lruvec_irq(lruvec);
> -                       lruvec = lock_page_lruvec_irq(page);
> -               }
> +               lruvec = relock_page_lruvec_irq(page, lruvec);
>
>                 if (!PageLRU(page) || !PageUnevictable(page))
>                         continue;
> --
> 1.8.3.1
>
Alex Shi July 30, 2020, 6:08 a.m. UTC | #2
在 2020/7/30 上午1:52, Alexander Duyck 写道:
>> +       rcu_read_lock();
>> +       locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
>> +       rcu_read_unlock();
>> +
>> +       if (locked)
>> +               return locked_lruvec;
>> +
>> +       if (locked_lruvec)
>> +               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
>> +
>> +       return lock_page_lruvec_irqsave(page, flags);
>> +}
>> +
> So looking these over they seem to be pretty inefficient for what they
> do. Basically in worst case (locked_lruvec == NULL) you end up calling
> mem_cgoup_page_lruvec and all the rcu_read_lock/unlock a couple times
> for a single page. It might make more sense to structure this like:
> if (locked_lruvec) {

Uh, we still need to check this page's lruvec, that needs a rcu_read_lock.
to save a mem_cgroup_page_lruvec call, we have to open lock_page_lruvec
as your mentained before.

>     if (lruvec_holds_page_lru_lock(page, locked_lruvec))
>         return locked_lruvec;
> 
>     unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> }
> return lock_page_lruvec_irqsave(page, flags);
> 
> The other piece that has me scratching my head is that I wonder if we
> couldn't do this without needing the rcu_read_lock. For example, what
> if we were to compare the page mem_cgroup pointer to the memcg back
> pointer stored in the mem_cgroup_per_node? It seems like ordering
> things this way would significantly reduce the overhead due to the
> pointer chasing to see if the page is in the locked lruvec or not.
> 

If page->mem_cgroup always be charged. the following could be better.

+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+               struct lruvec *locked_lruvec, unsigned long *flags)
+{
+       struct lruvec *lruvec;
+
+       if (mem_cgroup_disabled())
+               return locked_lruvec;
+
+       /* user page always be charged */
+       VM_BUG_ON_PAGE(!page->mem_cgroup, page);
+
+       rcu_read_lock();
+       if (likely(lruvec_memcg(locked_lruvec) == page->mem_cgroup)) {
+               rcu_read_unlock();
+               return locked_lruvec;
+       }
+
+       if (locked_lruvec)
+               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+
+       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+       spin_lock_irqsave(&lruvec->lru_lock, *flags);
+       rcu_read_unlock();
+       lruvec_memcg_debug(lruvec, page);
+
+       return lruvec;
+}
+

The user page is always be charged since readahead page is charged now.
and looks we also can apply this patch. I will test it to see if there is
other exception.


commit 826128346e50f6c60c513e166998466b593becad
Author: Alex Shi <alex.shi@linux.alibaba.com>
Date:   Thu Jul 30 13:58:38 2020 +0800

    mm/memcg: remove useless check on page->mem_cgroup

    Since readahead page will be charged on memcg too. We don't need to
    check this exception now.

    Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index af96217f2ec5..0c7f6bed199b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1336,12 +1336,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd

        VM_BUG_ON_PAGE(PageTail(page), page);
        memcg = READ_ONCE(page->mem_cgroup);
-       /*
-        * Swapcache readahead pages are added to the LRU - and
-        * possibly migrated - before they are charged.
-        */
-       if (!memcg)
-               memcg = root_mem_cgroup;

        mz = mem_cgroup_page_nodeinfo(memcg, page);
        lruvec = &mz->lruvec;
@@ -6962,10 +6956,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
        if (newpage->mem_cgroup)
                return;

-       /* Swapcache readahead pages can get replaced before being charged */
        memcg = oldpage->mem_cgroup;
-       if (!memcg)
-               return;

        /* Force-charge the new page. The old one will be freed soon */
        nr_pages = thp_nr_pages(newpage);
@@ -7160,10 +7151,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)

        memcg = page->mem_cgroup;

-       /* Readahead page, never charged */
-       if (!memcg)
-               return;
-
        /*
         * In case the memcg owning these pages has been offlined and doesn't
         * have an ID allocated to it anymore, charge the closest online
Alexander Duyck July 31, 2020, 2:20 p.m. UTC | #3
On Wed, Jul 29, 2020 at 11:08 PM Alex Shi <alex.shi@linux.alibaba.com> wrote:
>
>
>
> 在 2020/7/30 上午1:52, Alexander Duyck 写道:
> >> +       rcu_read_lock();
> >> +       locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
> >> +       rcu_read_unlock();
> >> +
> >> +       if (locked)
> >> +               return locked_lruvec;
> >> +
> >> +       if (locked_lruvec)
> >> +               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> >> +
> >> +       return lock_page_lruvec_irqsave(page, flags);
> >> +}
> >> +
> > So looking these over they seem to be pretty inefficient for what they
> > do. Basically in worst case (locked_lruvec == NULL) you end up calling
> > mem_cgoup_page_lruvec and all the rcu_read_lock/unlock a couple times
> > for a single page. It might make more sense to structure this like:
> > if (locked_lruvec) {
>
> Uh, we still need to check this page's lruvec, that needs a rcu_read_lock.
> to save a mem_cgroup_page_lruvec call, we have to open lock_page_lruvec
> as your mentained before.
>
> >     if (lruvec_holds_page_lru_lock(page, locked_lruvec))
> >         return locked_lruvec;
> >
> >     unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> > }
> > return lock_page_lruvec_irqsave(page, flags);
> >
> > The other piece that has me scratching my head is that I wonder if we
> > couldn't do this without needing the rcu_read_lock. For example, what
> > if we were to compare the page mem_cgroup pointer to the memcg back
> > pointer stored in the mem_cgroup_per_node? It seems like ordering
> > things this way would significantly reduce the overhead due to the
> > pointer chasing to see if the page is in the locked lruvec or not.
> >
>
> If page->mem_cgroup always be charged. the following could be better.
>
> +/* Don't lock again iff page's lruvec locked */
> +static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
> +               struct lruvec *locked_lruvec, unsigned long *flags)
> +{
> +       struct lruvec *lruvec;
> +
> +       if (mem_cgroup_disabled())
> +               return locked_lruvec;
> +
> +       /* user page always be charged */
> +       VM_BUG_ON_PAGE(!page->mem_cgroup, page);
> +
> +       rcu_read_lock();
> +       if (likely(lruvec_memcg(locked_lruvec) == page->mem_cgroup)) {
> +               rcu_read_unlock();
> +               return locked_lruvec;
> +       }
> +
> +       if (locked_lruvec)
> +               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
> +
> +       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> +       spin_lock_irqsave(&lruvec->lru_lock, *flags);
> +       rcu_read_unlock();
> +       lruvec_memcg_debug(lruvec, page);
> +
> +       return lruvec;
> +}
> +

I understand that you have to use the rcu_lock when you want to
acquire the lruvec via mem_cgroup_page_lruvec(). That is why I didn't
do away with the call to lock_page_lruvec_irqsave() at the end of the
function. However it doesn't make sense to do it when you are already
holding the locked_lruvec and simply getting the container of it in
order to compare pointer values.

One thing I was getting at with the lruvec_holds_page_lru_lock()
function I had introduced in my example is that the code baths for the
two relock functions are very similar. If we could move all the logic
for identifying if we can reuse the lock into a single function it
would dut down on the redundancy quite a bit as well. In addition by
testing for locked_lruvec != NULL before we before we do the
comparison we can save ourselves some unnecessary testing in the case
where

The thought I had was try to avoid the rcu_lock entirely in the lock
reuse case. Basically you just need to compare the pgdat value and the
memcg between the page and the lruvec. As long as they both point the
same values then you should have the correct lruvec and no need to
relock. There is no need to take the rcu_lock as long as you aren't
dereferencing something and if you are just comparing the pointers it
should be good with that. The fallback if mem_cgroup_disabled() is to
make certain the page pgdat->__lruvec is the address belonging to the
lruvec.

> The user page is always be charged since readahead page is charged now.
> and looks we also can apply this patch. I will test it to see if there is
> other exception.

Yes that would simplify things a bit as the code I had was having to
use a ternary to test for root_mem_cgroup if page->mem_cgroup was
NULL. I should be able to finish up testing today and will submit a
few clean-up patches as RFC to get your thoughts/feedback.

> commit 826128346e50f6c60c513e166998466b593becad
> Author: Alex Shi <alex.shi@linux.alibaba.com>
> Date:   Thu Jul 30 13:58:38 2020 +0800
>
>     mm/memcg: remove useless check on page->mem_cgroup
>
>     Since readahead page will be charged on memcg too. We don't need to
>     check this exception now.
>
>     Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index af96217f2ec5..0c7f6bed199b 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1336,12 +1336,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
>
>         VM_BUG_ON_PAGE(PageTail(page), page);
>         memcg = READ_ONCE(page->mem_cgroup);
> -       /*
> -        * Swapcache readahead pages are added to the LRU - and
> -        * possibly migrated - before they are charged.
> -        */
> -       if (!memcg)
> -               memcg = root_mem_cgroup;
>
>         mz = mem_cgroup_page_nodeinfo(memcg, page);
>         lruvec = &mz->lruvec;
> @@ -6962,10 +6956,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
>         if (newpage->mem_cgroup)
>                 return;
>
> -       /* Swapcache readahead pages can get replaced before being charged */
>         memcg = oldpage->mem_cgroup;
> -       if (!memcg)
> -               return;
>
>         /* Force-charge the new page. The old one will be freed soon */
>         nr_pages = thp_nr_pages(newpage);
> @@ -7160,10 +7151,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
>
>         memcg = page->mem_cgroup;
>
> -       /* Readahead page, never charged */
> -       if (!memcg)
> -               return;
> -
>         /*
>          * In case the memcg owning these pages has been offlined and doesn't
>          * have an ID allocated to it anymore, charge the closest online
>
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 258901021c6c..6e670f991b42 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1313,6 +1313,46 @@  static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
 	spin_unlock_irqrestore(&lruvec->lru_lock, flags);
 }
 
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+		struct lruvec *locked_lruvec)
+{
+	struct pglist_data *pgdat = page_pgdat(page);
+	bool locked;
+
+	rcu_read_lock();
+	locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
+	rcu_read_unlock();
+
+	if (locked)
+		return locked_lruvec;
+
+	if (locked_lruvec)
+		unlock_page_lruvec_irq(locked_lruvec);
+
+	return lock_page_lruvec_irq(page);
+}
+
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+		struct lruvec *locked_lruvec, unsigned long *flags)
+{
+	struct pglist_data *pgdat = page_pgdat(page);
+	bool locked;
+
+	rcu_read_lock();
+	locked = mem_cgroup_page_lruvec(page, pgdat) == locked_lruvec;
+	rcu_read_unlock();
+
+	if (locked)
+		return locked_lruvec;
+
+	if (locked_lruvec)
+		unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+
+	return lock_page_lruvec_irqsave(page, flags);
+}
+
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
diff --git a/mm/mlock.c b/mm/mlock.c
index 5d40d259a931..bc2fb3bfbe7a 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -303,17 +303,10 @@  static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 	/* Phase 1: page isolation */
 	for (i = 0; i < nr; i++) {
 		struct page *page = pvec->pages[i];
-		struct lruvec *new_lruvec;
 
 		/* block memcg change in mem_cgroup_move_account */
 		lock_page_memcg(page);
-		new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-		if (new_lruvec != lruvec) {
-			if (lruvec)
-				unlock_page_lruvec_irq(lruvec);
-			lruvec = lock_page_lruvec_irq(page);
-		}
-
+		lruvec = relock_page_lruvec_irq(page, lruvec);
 		if (TestClearPageMlocked(page)) {
 			/*
 			 * We already have pin from follow_page_mask()
diff --git a/mm/swap.c b/mm/swap.c
index 09edac441eb6..6d9c7288f7de 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -209,19 +209,12 @@  static void pagevec_lru_move_fn(struct pagevec *pvec,
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
 		struct page *page = pvec->pages[i];
-		struct lruvec *new_lruvec;
 
 		/* block memcg migration during page moving between lru */
 		if (!TestClearPageLRU(page))
 			continue;
 
-		new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-		if (lruvec != new_lruvec) {
-			if (lruvec)
-				unlock_page_lruvec_irqrestore(lruvec, flags);
-			lruvec = lock_page_lruvec_irqsave(page, &flags);
-		}
-
+		lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
 		(*move_fn)(page, lruvec);
 
 		SetPageLRU(page);
@@ -864,17 +857,12 @@  void release_pages(struct page **pages, int nr)
 		}
 
 		if (PageLRU(page)) {
-			struct lruvec *new_lruvec;
-
-			new_lruvec = mem_cgroup_page_lruvec(page,
-							page_pgdat(page));
-			if (new_lruvec != lruvec) {
-				if (lruvec)
-					unlock_page_lruvec_irqrestore(lruvec,
-									flags);
+			struct lruvec *prev_lruvec = lruvec;
+
+			lruvec = relock_page_lruvec_irqsave(page, lruvec,
+									&flags);
+			if (prev_lruvec != lruvec)
 				lock_batch = 0;
-				lruvec = lock_page_lruvec_irqsave(page, &flags);
-			}
 
 			__ClearPageLRU(page);
 			del_page_from_lru_list(page, lruvec, page_off_lru(page));
@@ -980,15 +968,8 @@  void __pagevec_lru_add(struct pagevec *pvec)
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
 		struct page *page = pvec->pages[i];
-		struct lruvec *new_lruvec;
-
-		new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-		if (lruvec != new_lruvec) {
-			if (lruvec)
-				unlock_page_lruvec_irqrestore(lruvec, flags);
-			lruvec = lock_page_lruvec_irqsave(page, &flags);
-		}
 
+		lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
 		__pagevec_lru_add_fn(page, lruvec);
 	}
 	if (lruvec)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 168c1659e430..bdb53a678e7e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4292,15 +4292,9 @@  void check_move_unevictable_pages(struct pagevec *pvec)
 
 	for (i = 0; i < pvec->nr; i++) {
 		struct page *page = pvec->pages[i];
-		struct lruvec *new_lruvec;
 
 		pgscanned++;
-		new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-		if (lruvec != new_lruvec) {
-			if (lruvec)
-				unlock_page_lruvec_irq(lruvec);
-			lruvec = lock_page_lruvec_irq(page);
-		}
+		lruvec = relock_page_lruvec_irq(page, lruvec);
 
 		if (!PageLRU(page) || !PageUnevictable(page))
 			continue;