@@ -197,13 +197,15 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
if (isolation && !TestClearPageLRU(page))
continue;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
(*move_fn)(page, lruvec, arg);
- unlock_page_lruvec_irqrestore(lruvec, flags);
if (isolation)
SetPageLRU(page);
}
+ if (lruvec)
+ unlock_page_lruvec_irqrestore(lruvec, flags);
+
release_pages(pvec->pages, pvec->nr);
pagevec_reinit(pvec);
}
@@ -821,14 +823,11 @@ void release_pages(struct page **pages, int nr)
}
if (TestClearPageLRU(page)) {
- struct lruvec *new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+ struct lruvec *pre_lruvec = lruvec;
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+ if (pre_lruvec != lruvec)
lock_batch = 0;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
Since we introduced relock_page_lruvec, we could use it in more place to reduce spin_locks. Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Hugh Dickins <hughd@google.com> Cc: linux-kernel@vger.kernel.org Cc: cgroups@vger.kernel.org Cc: linux-mm@kvack.org --- mm/swap.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-)