Message ID | 1596254957-22560-1-git-send-email-alex.shi@linux.alibaba.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/4] mm/thp: move lru_add_page_tail func to huge_memory.c | expand |
Hi Andrew, This patchset is just cleanup and get reviewed by Kirill, is it cath up 5.9? Thanks Alex 在 2020/8/1 下午12:09, Alex Shi 写道: > The func is only used in huge_memory.c, defining it in other file with a > CONFIG_TRANSPARENT_HUGEPAGE macro restrict just looks weird. > > Let's move it THP. And make it static as Hugh Dickin suggested. > > Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> > Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Johannes Weiner <hannes@cmpxchg.org> > Cc: Matthew Wilcox <willy@infradead.org> > Cc: Hugh Dickins <hughd@google.com> > Cc: linux-kernel@vger.kernel.org > Cc: linux-mm@kvack.org > --- > include/linux/swap.h | 2 -- > mm/huge_memory.c | 30 ++++++++++++++++++++++++++++++ > mm/swap.c | 33 --------------------------------- > 3 files changed, 30 insertions(+), 35 deletions(-) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 661046994db4..43e6b3458f58 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -338,8 +338,6 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, > unsigned int nr_pages); > extern void lru_note_cost_page(struct page *); > extern void lru_cache_add(struct page *); > -extern void lru_add_page_tail(struct page *page, struct page *page_tail, > - struct lruvec *lruvec, struct list_head *head); > extern void activate_page(struct page *); > extern void mark_page_accessed(struct page *); > extern void lru_add_drain(void); > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 90733cefa528..bc905e7079bf 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2315,6 +2315,36 @@ static void remap_page(struct page *page) > } > } > > +static void lru_add_page_tail(struct page *page, struct page *page_tail, > + struct lruvec *lruvec, struct list_head *list) > +{ > + VM_BUG_ON_PAGE(!PageHead(page), page); > + VM_BUG_ON_PAGE(PageCompound(page_tail), page); > + VM_BUG_ON_PAGE(PageLRU(page_tail), page); > + lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); > + > + if (!list) > + SetPageLRU(page_tail); > + > + if (likely(PageLRU(page))) > + list_add_tail(&page_tail->lru, &page->lru); > + else if (list) { > + /* page reclaim is reclaiming a huge page */ > + get_page(page_tail); > + list_add_tail(&page_tail->lru, list); > + } else { > + /* > + * Head page has not yet been counted, as an hpage, > + * so we must account for each subpage individually. > + * > + * Put page_tail on the list at the correct position > + * so they all end up in order. > + */ > + add_page_to_lru_list_tail(page_tail, lruvec, > + page_lru(page_tail)); > + } > +} > + > static void __split_huge_page_tail(struct page *head, int tail, > struct lruvec *lruvec, struct list_head *list) > { > diff --git a/mm/swap.c b/mm/swap.c > index d16d65d9b4e0..c674fb441fe9 100644 > --- a/mm/swap.c > +++ b/mm/swap.c > @@ -935,39 +935,6 @@ void __pagevec_release(struct pagevec *pvec) > } > EXPORT_SYMBOL(__pagevec_release); > > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > -/* used by __split_huge_page_refcount() */ > -void lru_add_page_tail(struct page *page, struct page *page_tail, > - struct lruvec *lruvec, struct list_head *list) > -{ > - VM_BUG_ON_PAGE(!PageHead(page), page); > - VM_BUG_ON_PAGE(PageCompound(page_tail), page); > - VM_BUG_ON_PAGE(PageLRU(page_tail), page); > - lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); > - > - if (!list) > - SetPageLRU(page_tail); > - > - if (likely(PageLRU(page))) > - list_add_tail(&page_tail->lru, &page->lru); > - else if (list) { > - /* page reclaim is reclaiming a huge page */ > - get_page(page_tail); > - list_add_tail(&page_tail->lru, list); > - } else { > - /* > - * Head page has not yet been counted, as an hpage, > - * so we must account for each subpage individually. > - * > - * Put page_tail on the list at the correct position > - * so they all end up in order. > - */ > - add_page_to_lru_list_tail(page_tail, lruvec, > - page_lru(page_tail)); > - } > -} > -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ > - > static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, > void *arg) > { >
diff --git a/include/linux/swap.h b/include/linux/swap.h index 661046994db4..43e6b3458f58 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -338,8 +338,6 @@ extern void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages); extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); -extern void lru_add_page_tail(struct page *page, struct page *page_tail, - struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 90733cefa528..bc905e7079bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2315,6 +2315,36 @@ static void remap_page(struct page *page) } } +static void lru_add_page_tail(struct page *page, struct page *page_tail, + struct lruvec *lruvec, struct list_head *list) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + VM_BUG_ON_PAGE(PageCompound(page_tail), page); + VM_BUG_ON_PAGE(PageLRU(page_tail), page); + lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); + + if (!list) + SetPageLRU(page_tail); + + if (likely(PageLRU(page))) + list_add_tail(&page_tail->lru, &page->lru); + else if (list) { + /* page reclaim is reclaiming a huge page */ + get_page(page_tail); + list_add_tail(&page_tail->lru, list); + } else { + /* + * Head page has not yet been counted, as an hpage, + * so we must account for each subpage individually. + * + * Put page_tail on the list at the correct position + * so they all end up in order. + */ + add_page_to_lru_list_tail(page_tail, lruvec, + page_lru(page_tail)); + } +} + static void __split_huge_page_tail(struct page *head, int tail, struct lruvec *lruvec, struct list_head *list) { diff --git a/mm/swap.c b/mm/swap.c index d16d65d9b4e0..c674fb441fe9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -935,39 +935,6 @@ void __pagevec_release(struct pagevec *pvec) } EXPORT_SYMBOL(__pagevec_release); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -/* used by __split_huge_page_refcount() */ -void lru_add_page_tail(struct page *page, struct page *page_tail, - struct lruvec *lruvec, struct list_head *list) -{ - VM_BUG_ON_PAGE(!PageHead(page), page); - VM_BUG_ON_PAGE(PageCompound(page_tail), page); - VM_BUG_ON_PAGE(PageLRU(page_tail), page); - lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); - - if (!list) - SetPageLRU(page_tail); - - if (likely(PageLRU(page))) - list_add_tail(&page_tail->lru, &page->lru); - else if (list) { - /* page reclaim is reclaiming a huge page */ - get_page(page_tail); - list_add_tail(&page_tail->lru, list); - } else { - /* - * Head page has not yet been counted, as an hpage, - * so we must account for each subpage individually. - * - * Put page_tail on the list at the correct position - * so they all end up in order. - */ - add_page_to_lru_list_tail(page_tail, lruvec, - page_lru(page_tail)); - } -} -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, void *arg) {