@@ -84,6 +84,8 @@ static __always_inline void add_page_to_lru_list(struct page *page,
{
enum lru_list lru = page_lru(page);
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
@@ -93,6 +95,8 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
{
enum lru_list lru = page_lru(page);
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
@@ -100,6 +104,8 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec)
{
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+
list_del(&page->lru);
update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-thp_nr_pages(page));
@@ -1656,6 +1656,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
+
nr_pages = compound_nr(page);
total_scan += nr_pages;
@@ -1866,7 +1868,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page);
nr_moved += nr_pages;
We need to make sure that the page is deleted from or added to the correct lruvec list. So add a VM_BUG_ON_PAGE() to catch invalid users. Signed-off-by: Muchun Song <songmuchun@bytedance.com> --- include/linux/mm_inline.h | 6 ++++++ mm/vmscan.c | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-)