@@ -67,8 +67,8 @@ In kernel use of migrate_pages()
Lists of pages to be migrated are generated by scanning over
pages and moving them into lists. This is done by
- calling isolate_lru_page().
- Calling isolate_lru_page() increases the references to the page
+ calling folio_isolate_lru().
+ Calling folio_isolate_lru() increases the references to the page
so that it cannot vanish while the page migration occurs.
It also prevents the swapper or other scans from encountering
the page.
@@ -86,7 +86,7 @@ How migrate_pages() works
migrate_pages() does several passes over its list of pages. A page is moved
if all references to a page are removable at the time. The page has
-already been removed from the LRU via isolate_lru_page() and the refcount
+already been removed from the LRU via folio_isolate_lru() and the refcount
is increased so that the page cannot be freed while page migration occurs.
Steps:
@@ -230,7 +230,7 @@ In Nick's patch, he used one of the struct page LRU list link fields as a count
of VM_LOCKED VMAs that map the page (Rik van Riel had the same idea three years
earlier). But this use of the link field for a count prevented the management
of the pages on an LRU list, and thus mlocked pages were not migratable as
-isolate_lru_page() could not detect them, and the LRU list link field was not
+folio_isolate_lru() could not detect them, and the LRU list link field was not
available to the migration subsystem.
Nick resolved this by putting mlocked pages back on the LRU list before
@@ -50,8 +50,8 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
1. 从LRU中移除页面。
- 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 isolate_lru_page()
- 来完成的。调用isolate_lru_page()增加了对该页的引用,这样在页面迁移发生时它就不会
+ 要迁移的页面列表是通过扫描页面并把它们移到列表中来生成的。这是通过调用 folio_isolate_lru()
+ 来完成的。调用folio_isolate_lru()增加了对该页的引用,这样在页面迁移发生时它就不会
消失。它还可以防止交换器或其他扫描器遇到该页。
@@ -65,7 +65,7 @@ migrate_pages()如何工作
=======================
migrate_pages()对它的页面列表进行了多次处理。如果当时对一个页面的所有引用都可以被移除,
-那么这个页面就会被移动。该页已经通过isolate_lru_page()从LRU中移除,并且refcount被
+那么这个页面就会被移动。该页已经通过folio_isolate_lru()从LRU中移除,并且refcount被
增加,以便在页面迁移发生时不释放该页。
步骤:
@@ -113,7 +113,7 @@
* ->private_lock (try_to_unmap_one)
* ->i_pages lock (try_to_unmap_one)
* ->lruvec->lru_lock (follow_page->mark_page_accessed)
- * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
+ * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
* ->private_lock (folio_remove_rmap_pte->set_page_dirty)
* ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
* bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
@@ -99,13 +99,6 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
-bool isolate_lru_page(struct page *page)
-{
- if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
- return false;
- return folio_isolate_lru((struct folio *)page);
-}
-
void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
@@ -383,7 +383,6 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
/*
* in mm/vmscan.c:
*/
-bool isolate_lru_page(struct page *page);
bool folio_isolate_lru(struct folio *folio);
void putback_lru_page(struct page *page);
void folio_putback_lru(struct folio *folio);
@@ -607,7 +607,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/*
- * We can do it before isolate_lru_page because the
+ * We can do it before folio_isolate_lru because the
* page can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
@@ -1847,7 +1847,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
result = SCAN_FAIL;
goto xa_unlocked;
}
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
} else if (folio_trylock(folio)) {
folio_get(folio);
@@ -1862,7 +1862,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
end - index);
- /* drain lru cache to help isolate_lru_page() */
+ /* drain lru cache to help folio_isolate_lru() */
lru_add_drain();
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
@@ -328,7 +328,7 @@ static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
/*
* One extra ref because caller holds an extra reference, either from
- * isolate_lru_page() for a regular page, or migrate_vma_collect() for
+ * folio_isolate_lru() for a regular page, or migrate_vma_collect() for
* a device page.
*/
int extra = 1 + (page == fault_page);
@@ -918,7 +918,7 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
/*
* lru_cache_disable() needs to be called before we start compiling
- * a list of pages to be migrated using isolate_lru_page().
+ * a list of pages to be migrated using folio_isolate_lru().
* It drains pages on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
There are no more callers of isolate_lru_page(), remove it. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- Documentation/mm/page_migration.rst | 6 +++--- Documentation/mm/unevictable-lru.rst | 2 +- Documentation/translations/zh_CN/mm/page_migration.rst | 6 +++--- mm/filemap.c | 2 +- mm/folio-compat.c | 7 ------- mm/internal.h | 1 - mm/khugepaged.c | 6 +++--- mm/migrate_device.c | 2 +- mm/swap.c | 2 +- 9 files changed, 13 insertions(+), 21 deletions(-)