Message ID | 20231204142146.91437-16-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/rmap: interface overhaul | expand |
On 04/12/2023 14:21, David Hildenbrand wrote: > Let's use folio_add_anon_rmap_ptes(), batching the rmap operations. > > While at it, use more folio operations (but only in the code branch we're > touching), use VM_WARN_ON_FOLIO(), and pass RMAP_COMPOUND instead of You mean RMAP_EXCLUSIVE? > manually setting PageAnonExclusive. > > We should never see non-anon pages on that branch: otherwise, the > existing page_add_anon_rmap() call would have been flawed already. > > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > mm/huge_memory.c | 23 +++++++++++++++-------- > 1 file changed, 15 insertions(+), 8 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index cb33c6e0404cf..2c037ab3f4916 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2099,6 +2099,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > unsigned long haddr, bool freeze) > { > struct mm_struct *mm = vma->vm_mm; > + struct folio *folio; > struct page *page; > pgtable_t pgtable; > pmd_t old_pmd, _pmd; > @@ -2194,16 +2195,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > uffd_wp = pmd_swp_uffd_wp(old_pmd); > } else { > page = pmd_page(old_pmd); > + folio = page_folio(page); > if (pmd_dirty(old_pmd)) { > dirty = true; > - SetPageDirty(page); > + folio_set_dirty(folio); > } > write = pmd_write(old_pmd); > young = pmd_young(old_pmd); > soft_dirty = pmd_soft_dirty(old_pmd); > uffd_wp = pmd_uffd_wp(old_pmd); > > - VM_BUG_ON_PAGE(!page_count(page), page); > + VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); > + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); > > /* > * Without "freeze", we'll simply split the PMD, propagating the > @@ -2220,11 +2223,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > * > * See page_try_share_anon_rmap(): invalidate PMD first. > */ > - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); > + anon_exclusive = PageAnonExclusive(page); > if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) > freeze = false; > - if (!freeze) > - page_ref_add(page, HPAGE_PMD_NR - 1); > + if (!freeze) { > + rmap_t rmap_flags = RMAP_NONE; > + > + folio_ref_add(folio, HPAGE_PMD_NR - 1); > + if (anon_exclusive) > + rmap_flags = RMAP_EXCLUSIVE; nit: I'd be inclined to make this |= since you're accumulating optional falgs. Yes, its the only one so it still works as is... > + folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, > + vma, haddr, rmap_flags); > + } > } > > /* > @@ -2267,8 +2277,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); > if (write) > entry = pte_mkwrite(entry, vma); > - if (anon_exclusive) > - SetPageAnonExclusive(page + i); > if (!young) > entry = pte_mkold(entry); > /* NOTE: this may set soft-dirty too on some archs */ > @@ -2278,7 +2286,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, > entry = pte_mksoft_dirty(entry); > if (uffd_wp) > entry = pte_mkuffd_wp(entry); > - page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); > } > VM_BUG_ON(!pte_none(ptep_get(pte))); > set_pte_at(mm, addr, pte, entry);
On 05.12.23 13:22, Ryan Roberts wrote: > On 04/12/2023 14:21, David Hildenbrand wrote: >> Let's use folio_add_anon_rmap_ptes(), batching the rmap operations. >> >> While at it, use more folio operations (but only in the code branch we're >> touching), use VM_WARN_ON_FOLIO(), and pass RMAP_COMPOUND instead of > > You mean RMAP_EXCLUSIVE? Indeed. [...] >> - if (!freeze) >> - page_ref_add(page, HPAGE_PMD_NR - 1); >> + if (!freeze) { >> + rmap_t rmap_flags = RMAP_NONE; >> + >> + folio_ref_add(folio, HPAGE_PMD_NR - 1); >> + if (anon_exclusive) >> + rmap_flags = RMAP_EXCLUSIVE; > > nit: I'd be inclined to make this |= since you're accumulating optional falgs. > Yes, its the only one so it still works as is... Make sense!
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cb33c6e0404cf..2c037ab3f4916 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2099,6 +2099,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) { struct mm_struct *mm = vma->vm_mm; + struct folio *folio; struct page *page; pgtable_t pgtable; pmd_t old_pmd, _pmd; @@ -2194,16 +2195,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, uffd_wp = pmd_swp_uffd_wp(old_pmd); } else { page = pmd_page(old_pmd); + folio = page_folio(page); if (pmd_dirty(old_pmd)) { dirty = true; - SetPageDirty(page); + folio_set_dirty(folio); } write = pmd_write(old_pmd); young = pmd_young(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd); uffd_wp = pmd_uffd_wp(old_pmd); - VM_BUG_ON_PAGE(!page_count(page), page); + VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); /* * Without "freeze", we'll simply split the PMD, propagating the @@ -2220,11 +2223,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * * See page_try_share_anon_rmap(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = PageAnonExclusive(page); if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) freeze = false; - if (!freeze) - page_ref_add(page, HPAGE_PMD_NR - 1); + if (!freeze) { + rmap_t rmap_flags = RMAP_NONE; + + folio_ref_add(folio, HPAGE_PMD_NR - 1); + if (anon_exclusive) + rmap_flags = RMAP_EXCLUSIVE; + folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, + vma, haddr, rmap_flags); + } } /* @@ -2267,8 +2277,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); if (write) entry = pte_mkwrite(entry, vma); - if (anon_exclusive) - SetPageAnonExclusive(page + i); if (!young) entry = pte_mkold(entry); /* NOTE: this may set soft-dirty too on some archs */ @@ -2278,7 +2286,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mksoft_dirty(entry); if (uffd_wp) entry = pte_mkuffd_wp(entry); - page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); } VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry);
Let's use folio_add_anon_rmap_ptes(), batching the rmap operations. While at it, use more folio operations (but only in the code branch we're touching), use VM_WARN_ON_FOLIO(), and pass RMAP_COMPOUND instead of manually setting PageAnonExclusive. We should never see non-anon pages on that branch: otherwise, the existing page_add_anon_rmap() call would have been flawed already. Signed-off-by: David Hildenbrand <david@redhat.com> --- mm/huge_memory.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-)