Message ID | 20240827114728.3212578-3-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: memory_hotplug: improve do_migrate_range() | expand |
On 2024/8/27 19:47, Kefeng Wang wrote: > Add unmap_poisoned_folio() helper which will be reused by > do_migrate_range() from memory hotplug soon. > > Acked-by: David Hildenbrand <david@redhat.com> > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> LGTM. One nit below. > --- > mm/internal.h | 8 ++++++++ > mm/memory-failure.c | 43 ++++++++++++++++++++++++++----------------- > 2 files changed, 34 insertions(+), 17 deletions(-) > > diff --git a/mm/internal.h b/mm/internal.h > index 5e6f2abcea28..b00ea4595d18 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -1048,6 +1048,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) > /* > * mm/memory-failure.c > */ > +#ifdef CONFIG_MEMORY_FAILURE > +void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu); > void shake_folio(struct folio *folio); > extern int hwpoison_filter(struct page *p); > > @@ -1068,6 +1070,12 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p, > unsigned long ksm_addr); > unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); > > +#else > +static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) > +{ > +} > +#endif > + > extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, > unsigned long, unsigned long, > unsigned long, unsigned long); > diff --git a/mm/memory-failure.c b/mm/memory-failure.c > index 353254537b54..67b6b259a75d 100644 > --- a/mm/memory-failure.c > +++ b/mm/memory-failure.c > @@ -1554,6 +1554,31 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) > return ret; > } > > +void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) > +{ > + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { > + struct address_space *mapping; It might be better to have a empty line here. > + /* > + * For hugetlb folios in shared mappings, try_to_unmap > + * could potentially call huge_pmd_unshare. Because of > + * this, take semaphore in write mode here and set > + * TTU_RMAP_LOCKED to indicate we have taken the lock > + * at this higher level. > + */ > + mapping = hugetlb_folio_mapping_lock_write(folio); > + if (!mapping) { > + pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", > + folio_pfn(folio)); > + return; > + } > + Acked-by: Miaohe Lin <linmiaohe@huawei.com> Thanks. .
diff --git a/mm/internal.h b/mm/internal.h index 5e6f2abcea28..b00ea4595d18 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1048,6 +1048,8 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) /* * mm/memory-failure.c */ +#ifdef CONFIG_MEMORY_FAILURE +void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu); void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); @@ -1068,6 +1070,12 @@ void add_to_kill_ksm(struct task_struct *tsk, struct page *p, unsigned long ksm_addr); unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); +#else +static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +{ +} +#endif + extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 353254537b54..67b6b259a75d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1554,6 +1554,31 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) return ret; } +void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +{ + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { + struct address_space *mapping; + /* + * For hugetlb folios in shared mappings, try_to_unmap + * could potentially call huge_pmd_unshare. Because of + * this, take semaphore in write mode here and set + * TTU_RMAP_LOCKED to indicate we have taken the lock + * at this higher level. + */ + mapping = hugetlb_folio_mapping_lock_write(folio); + if (!mapping) { + pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", + folio_pfn(folio)); + return; + } + + try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); + i_mmap_unlock_write(mapping); + } else { + try_to_unmap(folio, ttu); + } +} + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. @@ -1615,23 +1640,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, */ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); - if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { - /* - * For hugetlb pages in shared mappings, try_to_unmap - * could potentially call huge_pmd_unshare. Because of - * this, take semaphore in write mode here and set - * TTU_RMAP_LOCKED to indicate we have taken the lock - * at this higher level. - */ - mapping = hugetlb_folio_mapping_lock_write(folio); - if (mapping) { - try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); - i_mmap_unlock_write(mapping); - } else - pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); - } else { - try_to_unmap(folio, ttu); - } + unmap_poisoned_folio(folio, ttu); unmap_success = !folio_mapped(folio); if (!unmap_success)