Message ID | 20240817084941.2375713-5-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: memory_hotplug: improve do_migrate_range() | expand |
On 2024/8/17 16:49, Kefeng Wang wrote: > Add isolate_folio_to_list() helper to try to isolate HugeTLB, > no-LRU movable and LRU folios to a list, which will be reused by > do_migrate_range() from memory hotplug soon, also drop the > mf_isolate_folio() since we could directly use new helper in > the soft_offline_in_use_page(). > > Acked-by: David Hildenbrand <david@redhat.com> > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Thanks for your patch. > --- > include/linux/migrate.h | 3 +++ > mm/memory-failure.c | 46 ++++++++++------------------------------- > mm/migrate.c | 27 ++++++++++++++++++++++++ > 3 files changed, 41 insertions(+), 35 deletions(-) > > diff --git a/include/linux/migrate.h b/include/linux/migrate.h > index 644be30b69c8..002e49b2ebd9 100644 > --- a/include/linux/migrate.h > +++ b/include/linux/migrate.h > @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, > unsigned int *ret_succeeded); > struct folio *alloc_migration_target(struct folio *src, unsigned long private); > bool isolate_movable_page(struct page *page, isolate_mode_t mode); > +bool isolate_folio_to_list(struct folio *folio, struct list_head *list); > > int migrate_huge_page_move_mapping(struct address_space *mapping, > struct folio *dst, struct folio *src); > @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src, > { return NULL; } > static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) > { return false; } > +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) > + { return false; } > > static inline int migrate_huge_page_move_mapping(struct address_space *mapping, > struct folio *dst, struct folio *src) > diff --git a/mm/memory-failure.c b/mm/memory-failure.c > index 93848330de1f..d8298017bd99 100644 > --- a/mm/memory-failure.c > +++ b/mm/memory-failure.c > @@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory); > #undef pr_fmt > #define pr_fmt(fmt) "Soft offline: " fmt > > -static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist) > -{ > - bool isolated = false; > - > - if (folio_test_hugetlb(folio)) { > - isolated = isolate_hugetlb(folio, pagelist); > - } else { > - bool lru = !__folio_test_movable(folio); > - > - if (lru) > - isolated = folio_isolate_lru(folio); > - else > - isolated = isolate_movable_page(&folio->page, > - ISOLATE_UNEVICTABLE); > - > - if (isolated) { > - list_add(&folio->lru, pagelist); > - if (lru) > - node_stat_add_folio(folio, NR_ISOLATED_ANON + > - folio_is_file_lru(folio)); > - } > - } > - > - /* > - * If we succeed to isolate the folio, we grabbed another refcount on > - * the folio, so we can safely drop the one we got from get_any_page(). > - * If we failed to isolate the folio, it means that we cannot go further > - * and we will return an error, so drop the reference we got from > - * get_any_page() as well. > - */ > - folio_put(folio); > - return isolated; > -} > - > /* > * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. > * If the page is a non-dirty unmapped page-cache page, it simply invalidates. > @@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page) > return 0; > } > > - if (mf_isolate_folio(folio, &pagelist)) { > + if (isolate_folio_to_list(folio, &pagelist)) { > ret = migrate_pages(&pagelist, alloc_migration_target, NULL, > (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); > if (!ret) { > @@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page) > pfn, msg_page[huge], page_count(page), &page->flags); > ret = -EBUSY; > } > + > + /* > + * If we succeed to isolate the folio, we grabbed another refcount on > + * the folio, so we can safely drop the one we got from get_any_page(). > + * If we failed to isolate the folio, it means that we cannot go further > + * and we will return an error, so drop the reference we got from > + * get_any_page() as well. > + */ > + folio_put(folio); Why folio_put() is deferred here? With this change, folio will have extra two refcnt when calling migrate_pages() above. One is from get_any_page() and another one from folio_isolate_lru(). This would lead to migrate_pages() never success. And my many testcases failed due to this change. Thanks. .
On 2024/8/20 17:32, Miaohe Lin wrote: > On 2024/8/17 16:49, Kefeng Wang wrote: >> Add isolate_folio_to_list() helper to try to isolate HugeTLB, >> no-LRU movable and LRU folios to a list, which will be reused by >> do_migrate_range() from memory hotplug soon, also drop the >> mf_isolate_folio() since we could directly use new helper in >> the soft_offline_in_use_page(). >> >> Acked-by: David Hildenbrand <david@redhat.com> >> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > > Thanks for your patch. > >> --- >> include/linux/migrate.h | 3 +++ >> mm/memory-failure.c | 46 ++++++++++------------------------------- >> mm/migrate.c | 27 ++++++++++++++++++++++++ >> 3 files changed, 41 insertions(+), 35 deletions(-) >> >> diff --git a/include/linux/migrate.h b/include/linux/migrate.h >> index 644be30b69c8..002e49b2ebd9 100644 >> --- a/include/linux/migrate.h >> +++ b/include/linux/migrate.h >> @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, >> unsigned int *ret_succeeded); >> struct folio *alloc_migration_target(struct folio *src, unsigned long private); >> bool isolate_movable_page(struct page *page, isolate_mode_t mode); >> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list); >> >> int migrate_huge_page_move_mapping(struct address_space *mapping, >> struct folio *dst, struct folio *src); >> @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src, >> { return NULL; } >> static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) >> { return false; } >> +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) >> + { return false; } >> >> static inline int migrate_huge_page_move_mapping(struct address_space *mapping, >> struct folio *dst, struct folio *src) >> diff --git a/mm/memory-failure.c b/mm/memory-failure.c >> index 93848330de1f..d8298017bd99 100644 >> --- a/mm/memory-failure.c >> +++ b/mm/memory-failure.c >> @@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory); >> #undef pr_fmt >> #define pr_fmt(fmt) "Soft offline: " fmt >> >> -static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist) >> -{ >> - bool isolated = false; >> - >> - if (folio_test_hugetlb(folio)) { >> - isolated = isolate_hugetlb(folio, pagelist); >> - } else { >> - bool lru = !__folio_test_movable(folio); >> - >> - if (lru) >> - isolated = folio_isolate_lru(folio); >> - else >> - isolated = isolate_movable_page(&folio->page, >> - ISOLATE_UNEVICTABLE); >> - >> - if (isolated) { >> - list_add(&folio->lru, pagelist); >> - if (lru) >> - node_stat_add_folio(folio, NR_ISOLATED_ANON + >> - folio_is_file_lru(folio)); >> - } >> - } >> - >> - /* >> - * If we succeed to isolate the folio, we grabbed another refcount on >> - * the folio, so we can safely drop the one we got from get_any_page(). >> - * If we failed to isolate the folio, it means that we cannot go further >> - * and we will return an error, so drop the reference we got from >> - * get_any_page() as well. >> - */ >> - folio_put(folio); >> - return isolated; >> -} >> - >> /* >> * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. >> * If the page is a non-dirty unmapped page-cache page, it simply invalidates. >> @@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page) >> return 0; >> } >> >> - if (mf_isolate_folio(folio, &pagelist)) { >> + if (isolate_folio_to_list(folio, &pagelist)) { >> ret = migrate_pages(&pagelist, alloc_migration_target, NULL, >> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); >> if (!ret) { >> @@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page) >> pfn, msg_page[huge], page_count(page), &page->flags); >> ret = -EBUSY; >> } >> + >> + /* >> + * If we succeed to isolate the folio, we grabbed another refcount on >> + * the folio, so we can safely drop the one we got from get_any_page(). >> + * If we failed to isolate the folio, it means that we cannot go further >> + * and we will return an error, so drop the reference we got from >> + * get_any_page() as well. >> + */ >> + folio_put(folio); > > Why folio_put() is deferred here? With this change, folio will have extra two refcnt when > calling migrate_pages() above. One is from get_any_page() and another one from folio_isolate_lru(). > This would lead to migrate_pages() never success. And my many testcases failed due to this > change. Thanks for your review, missing this, only test memory-hotplug, could you try this, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d8298017bd99..64a145a0e29f 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page) struct folio *folio = page_folio(page); char const *msg_page[] = {"page", "hugepage"}; bool huge = folio_test_hugetlb(folio); + bool isolated; LIST_HEAD(pagelist); struct migration_target_control mtc = { .nid = NUMA_NO_NODE, @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page) return 0; } - if (isolate_folio_to_list(folio, &pagelist)) { + isolated = isolate_folio_to_list(folio, &pagelist); + + /* + * If we succeed to isolate the folio, we grabbed another refcount on + * the folio, so we can safely drop the one we got from get_any_page(). + * If we failed to isolate the folio, it means that we cannot go further + * and we will return an error, so drop the reference we got from + * get_any_page() as well. + */ + folio_put(folio); + + if (isolated) { ret = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); if (!ret) { @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page) ret = -EBUSY; } - /* - * If we succeed to isolate the folio, we grabbed another refcount on - * the folio, so we can safely drop the one we got from get_any_page(). - * If we failed to isolate the folio, it means that we cannot go further - * and we will return an error, so drop the reference we got from - * get_any_page() as well. - */ - folio_put(folio); - return ret; } > > Thanks. > .
On 2024/8/20 17:46, Kefeng Wang wrote: > > > On 2024/8/20 17:32, Miaohe Lin wrote: >> On 2024/8/17 16:49, Kefeng Wang wrote: >>> Add isolate_folio_to_list() helper to try to isolate HugeTLB, >>> no-LRU movable and LRU folios to a list, which will be reused by >>> do_migrate_range() from memory hotplug soon, also drop the >>> mf_isolate_folio() since we could directly use new helper in >>> the soft_offline_in_use_page(). >>> >>> Acked-by: David Hildenbrand <david@redhat.com> >>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> >> >> Thanks for your patch. >>> > diff --git a/mm/memory-failure.c b/mm/memory-failure.c > index d8298017bd99..64a145a0e29f 100644 > --- a/mm/memory-failure.c > +++ b/mm/memory-failure.c > @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page) > struct folio *folio = page_folio(page); > char const *msg_page[] = {"page", "hugepage"}; > bool huge = folio_test_hugetlb(folio); > + bool isolated; > LIST_HEAD(pagelist); > struct migration_target_control mtc = { > .nid = NUMA_NO_NODE, > @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page) > return 0; > } > > - if (isolate_folio_to_list(folio, &pagelist)) { > + isolated = isolate_folio_to_list(folio, &pagelist); > + > + /* > + * If we succeed to isolate the folio, we grabbed another refcount on > + * the folio, so we can safely drop the one we got from get_any_page(). > + * If we failed to isolate the folio, it means that we cannot go further > + * and we will return an error, so drop the reference we got from > + * get_any_page() as well. > + */ > + folio_put(folio); > + > + if (isolated) { > ret = migrate_pages(&pagelist, alloc_migration_target, NULL, > (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); > if (!ret) { > @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page) > ret = -EBUSY; > } > > - /* > - * If we succeed to isolate the folio, we grabbed another refcount on > - * the folio, so we can safely drop the one we got from get_any_page(). > - * If we failed to isolate the folio, it means that we cannot go further > - * and we will return an error, so drop the reference we got from > - * get_any_page() as well. > - */ > - folio_put(folio); > - > return ret; > } This works to me. Thanks. .
On 2024/8/21 10:00, Miaohe Lin wrote: > On 2024/8/20 17:46, Kefeng Wang wrote: >> >> >> On 2024/8/20 17:32, Miaohe Lin wrote: >>> On 2024/8/17 16:49, Kefeng Wang wrote: >>>> Add isolate_folio_to_list() helper to try to isolate HugeTLB, >>>> no-LRU movable and LRU folios to a list, which will be reused by >>>> do_migrate_range() from memory hotplug soon, also drop the >>>> mf_isolate_folio() since we could directly use new helper in >>>> the soft_offline_in_use_page(). >>>> >>>> Acked-by: David Hildenbrand <david@redhat.com> >>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> >>> >>> Thanks for your patch. >>>> >> diff --git a/mm/memory-failure.c b/mm/memory-failure.c >> index d8298017bd99..64a145a0e29f 100644 >> --- a/mm/memory-failure.c >> +++ b/mm/memory-failure.c >> @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page) >> struct folio *folio = page_folio(page); >> char const *msg_page[] = {"page", "hugepage"}; >> bool huge = folio_test_hugetlb(folio); >> + bool isolated; >> LIST_HEAD(pagelist); >> struct migration_target_control mtc = { >> .nid = NUMA_NO_NODE, >> @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page) >> return 0; >> } >> >> - if (isolate_folio_to_list(folio, &pagelist)) { >> + isolated = isolate_folio_to_list(folio, &pagelist); >> + >> + /* >> + * If we succeed to isolate the folio, we grabbed another refcount on >> + * the folio, so we can safely drop the one we got from get_any_page(). >> + * If we failed to isolate the folio, it means that we cannot go further >> + * and we will return an error, so drop the reference we got from >> + * get_any_page() as well. >> + */ >> + folio_put(folio); >> + >> + if (isolated) { >> ret = migrate_pages(&pagelist, alloc_migration_target, NULL, >> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); >> if (!ret) { >> @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page) >> ret = -EBUSY; >> } >> >> - /* >> - * If we succeed to isolate the folio, we grabbed another refcount on >> - * the folio, so we can safely drop the one we got from get_any_page(). >> - * If we failed to isolate the folio, it means that we cannot go further >> - * and we will return an error, so drop the reference we got from >> - * get_any_page() as well. >> - */ >> - folio_put(folio); >> - >> return ret; >> } > > This works to me. Good, my bad for break. Andrew,please help to squash above changes, thanks. > > Thanks. > .
On 2024/8/21 10:14, Kefeng Wang wrote: > > > On 2024/8/21 10:00, Miaohe Lin wrote: >> On 2024/8/20 17:46, Kefeng Wang wrote: >>> >>> >>> On 2024/8/20 17:32, Miaohe Lin wrote: >>>> On 2024/8/17 16:49, Kefeng Wang wrote: >>>>> Add isolate_folio_to_list() helper to try to isolate HugeTLB, >>>>> no-LRU movable and LRU folios to a list, which will be reused by >>>>> do_migrate_range() from memory hotplug soon, also drop the >>>>> mf_isolate_folio() since we could directly use new helper in >>>>> the soft_offline_in_use_page(). >>>>> >>>>> Acked-by: David Hildenbrand <david@redhat.com> >>>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> >>>> >>>> Thanks for your patch. >>>>> >>> diff --git a/mm/memory-failure.c b/mm/memory-failure.c >>> index d8298017bd99..64a145a0e29f 100644 >>> --- a/mm/memory-failure.c >>> +++ b/mm/memory-failure.c >>> @@ -2671,6 +2671,7 @@ static int soft_offline_in_use_page(struct page *page) >>> struct folio *folio = page_folio(page); >>> char const *msg_page[] = {"page", "hugepage"}; >>> bool huge = folio_test_hugetlb(folio); >>> + bool isolated; >>> LIST_HEAD(pagelist); >>> struct migration_target_control mtc = { >>> .nid = NUMA_NO_NODE, >>> @@ -2710,7 +2711,18 @@ static int soft_offline_in_use_page(struct page *page) >>> return 0; >>> } >>> >>> - if (isolate_folio_to_list(folio, &pagelist)) { >>> + isolated = isolate_folio_to_list(folio, &pagelist); >>> + >>> + /* >>> + * If we succeed to isolate the folio, we grabbed another refcount on >>> + * the folio, so we can safely drop the one we got from get_any_page(). >>> + * If we failed to isolate the folio, it means that we cannot go further >>> + * and we will return an error, so drop the reference we got from >>> + * get_any_page() as well. >>> + */ >>> + folio_put(folio); >>> + >>> + if (isolated) { >>> ret = migrate_pages(&pagelist, alloc_migration_target, NULL, >>> (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); >>> if (!ret) { >>> @@ -2733,15 +2745,6 @@ static int soft_offline_in_use_page(struct page *page) >>> ret = -EBUSY; >>> } >>> >>> - /* >>> - * If we succeed to isolate the folio, we grabbed another refcount on >>> - * the folio, so we can safely drop the one we got from get_any_page(). >>> - * If we failed to isolate the folio, it means that we cannot go further >>> - * and we will return an error, so drop the reference we got from >>> - * get_any_page() as well. >>> - */ >>> - folio_put(folio); >>> - >>> return ret; >>> } >> >> This works to me. > > Good, my bad for break. > > Andrew,please help to squash above changes, thanks. With above changes added, this patch looks good to me. Acked-by: Miaohe Lin <linmiaohe@huawei.com> Tested-by: Miaohe Lin <linmiaohe@huawei.com> Thanks. .
> > diff --git a/mm/migrate.c b/mm/migrate.c > index dbfa910ec24b..53f8429a8ebe 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l) > } > } > > +/* Must be called with an elevated refcount on the non-hugetlb folio */ > +bool isolate_folio_to_list(struct folio *folio, struct list_head *list) > +{ > + bool isolated = false; No need to initialize this to "false". > + > + if (folio_test_hugetlb(folio)) { > + isolated = isolate_hugetlb(folio, list); > + } else { > + bool lru = !__folio_test_movable(folio); > + > + if (lru) > + isolated = folio_isolate_lru(folio); > + else > + isolated = isolate_movable_page(&folio->page, > + ISOLATE_UNEVICTABLE); > + > + if (isolated) { > + list_add(&folio->lru, list); > + if (lru) > + node_stat_add_folio(folio, NR_ISOLATED_ANON + > + folio_is_file_lru(folio)); > + } > + } > + > + return isolated; Revisiting this patch, we should likely do bool isolated, lru; if (folio_test_hugetlb(folio)) return isolate_hugetlb(folio, list); lru = !__folio_test_movable(folio); if (lru) ... if (!isolated) return false; list_add(&folio->lru, list); if (lru) node_stat_add_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio)); return true; to avoid one indentation level and clean up the code flow a bit. > +} > + > static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, > struct folio *folio, > unsigned long idx)
On 2024/8/26 22:50, David Hildenbrand wrote: >> diff --git a/mm/migrate.c b/mm/migrate.c >> index dbfa910ec24b..53f8429a8ebe 100644 >> --- a/mm/migrate.c >> +++ b/mm/migrate.c >> @@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l) >> } >> } >> +/* Must be called with an elevated refcount on the non-hugetlb folio */ >> +bool isolate_folio_to_list(struct folio *folio, struct list_head *list) >> +{ >> + bool isolated = false; > > No need to initialize this to "false". > >> + >> + if (folio_test_hugetlb(folio)) { >> + isolated = isolate_hugetlb(folio, list); >> + } else { >> + bool lru = !__folio_test_movable(folio); >> + >> + if (lru) >> + isolated = folio_isolate_lru(folio); >> + else >> + isolated = isolate_movable_page(&folio->page, >> + ISOLATE_UNEVICTABLE); >> + >> + if (isolated) { >> + list_add(&folio->lru, list); >> + if (lru) >> + node_stat_add_folio(folio, NR_ISOLATED_ANON + >> + folio_is_file_lru(folio)); >> + } >> + } >> + >> + return isolated; > > Revisiting this patch, we should likely do > > bool isolated, lru; > > if (folio_test_hugetlb(folio)) > return isolate_hugetlb(folio, list); > > lru = !__folio_test_movable(folio); > if (lru) > ... > > if (!isolated) > return false; > > list_add(&folio->lru, list); > if (lru) > node_stat_add_folio(folio, NR_ISOLATED_ANON + > folio_is_file_lru(folio)); > return true; > > > to avoid one indentation level and clean up the code flow a bit. Sure, will rewrite according to above pattern. > >> +} >> + >> static bool try_to_map_unused_to_zeropage(struct >> page_vma_mapped_walk *pvmw, >> struct folio *folio, >> unsigned long idx) >
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 644be30b69c8..002e49b2ebd9 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned int *ret_succeeded); struct folio *alloc_migration_target(struct folio *src, unsigned long private); bool isolate_movable_page(struct page *page, isolate_mode_t mode); +bool isolate_folio_to_list(struct folio *folio, struct list_head *list); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src, { return NULL; } static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) { return false; } +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) + { return false; } static inline int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 93848330de1f..d8298017bd99 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2659,40 +2659,6 @@ EXPORT_SYMBOL(unpoison_memory); #undef pr_fmt #define pr_fmt(fmt) "Soft offline: " fmt -static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist) -{ - bool isolated = false; - - if (folio_test_hugetlb(folio)) { - isolated = isolate_hugetlb(folio, pagelist); - } else { - bool lru = !__folio_test_movable(folio); - - if (lru) - isolated = folio_isolate_lru(folio); - else - isolated = isolate_movable_page(&folio->page, - ISOLATE_UNEVICTABLE); - - if (isolated) { - list_add(&folio->lru, pagelist); - if (lru) - node_stat_add_folio(folio, NR_ISOLATED_ANON + - folio_is_file_lru(folio)); - } - } - - /* - * If we succeed to isolate the folio, we grabbed another refcount on - * the folio, so we can safely drop the one we got from get_any_page(). - * If we failed to isolate the folio, it means that we cannot go further - * and we will return an error, so drop the reference we got from - * get_any_page() as well. - */ - folio_put(folio); - return isolated; -} - /* * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. * If the page is a non-dirty unmapped page-cache page, it simply invalidates. @@ -2744,7 +2710,7 @@ static int soft_offline_in_use_page(struct page *page) return 0; } - if (mf_isolate_folio(folio, &pagelist)) { + if (isolate_folio_to_list(folio, &pagelist)) { ret = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); if (!ret) { @@ -2766,6 +2732,16 @@ static int soft_offline_in_use_page(struct page *page) pfn, msg_page[huge], page_count(page), &page->flags); ret = -EBUSY; } + + /* + * If we succeed to isolate the folio, we grabbed another refcount on + * the folio, so we can safely drop the one we got from get_any_page(). + * If we failed to isolate the folio, it means that we cannot go further + * and we will return an error, so drop the reference we got from + * get_any_page() as well. + */ + folio_put(folio); + return ret; } diff --git a/mm/migrate.c b/mm/migrate.c index dbfa910ec24b..53f8429a8ebe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -178,6 +178,33 @@ void putback_movable_pages(struct list_head *l) } } +/* Must be called with an elevated refcount on the non-hugetlb folio */ +bool isolate_folio_to_list(struct folio *folio, struct list_head *list) +{ + bool isolated = false; + + if (folio_test_hugetlb(folio)) { + isolated = isolate_hugetlb(folio, list); + } else { + bool lru = !__folio_test_movable(folio); + + if (lru) + isolated = folio_isolate_lru(folio); + else + isolated = isolate_movable_page(&folio->page, + ISOLATE_UNEVICTABLE); + + if (isolated) { + list_add(&folio->lru, list); + if (lru) + node_stat_add_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio)); + } + } + + return isolated; +} + static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, struct folio *folio, unsigned long idx)