Message ID | 20231204142146.91437-9-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/rmap: interface overhaul | expand |
On 12/4/2023 10:21 PM, David Hildenbrand wrote: > Let's convert insert_page_into_pte_locked() and do_set_pmd(). While at it, > perform some folio conversion. > > Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Yes. I make sure my RB tag this time. :). Regards Yin, Fengwei > --- > mm/memory.c | 14 ++++++++------ > 1 file changed, 8 insertions(+), 6 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 15325587cff01..be7fe58f7c297 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1845,12 +1845,14 @@ static int validate_page_before_insert(struct page *page) > static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, > unsigned long addr, struct page *page, pgprot_t prot) > { > + struct folio *folio = page_folio(page); > + > if (!pte_none(ptep_get(pte))) > return -EBUSY; > /* Ok, finally just insert the thing.. */ > - get_page(page); > + folio_get(folio); > inc_mm_counter(vma->vm_mm, mm_counter_file(page)); > - page_add_file_rmap(page, vma, false); > + folio_add_file_rmap_pte(folio, page, vma); > set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); > return 0; > } > @@ -4308,6 +4310,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) > > vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > { > + struct folio *folio = page_folio(page); > struct vm_area_struct *vma = vmf->vma; > bool write = vmf->flags & FAULT_FLAG_WRITE; > unsigned long haddr = vmf->address & HPAGE_PMD_MASK; > @@ -4317,8 +4320,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > if (!transhuge_vma_suitable(vma, haddr)) > return ret; > > - page = compound_head(page); > - if (compound_order(page) != HPAGE_PMD_ORDER) > + if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) > return ret; > > /* > @@ -4327,7 +4329,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > * check. This kind of THP just can be PTE mapped. Access to > * the corrupted subpage should trigger SIGBUS as expected. > */ > - if (unlikely(PageHasHWPoisoned(page))) > + if (unlikely(folio_test_has_hwpoisoned(folio))) > return ret; > > /* > @@ -4351,7 +4353,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) > entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); > > add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); > - page_add_file_rmap(page, vma, true); > + folio_add_file_rmap_pmd(folio, page, vma); > > /* > * deposit and withdraw with pmd lock held
diff --git a/mm/memory.c b/mm/memory.c index 15325587cff01..be7fe58f7c297 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1845,12 +1845,14 @@ static int validate_page_before_insert(struct page *page) static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { + struct folio *folio = page_folio(page); + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ - get_page(page); + folio_get(folio); inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); + folio_add_file_rmap_pte(folio, page, vma); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } @@ -4308,6 +4310,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; @@ -4317,8 +4320,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (!transhuge_vma_suitable(vma, haddr)) return ret; - page = compound_head(page); - if (compound_order(page) != HPAGE_PMD_ORDER) + if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) return ret; /* @@ -4327,7 +4329,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ - if (unlikely(PageHasHWPoisoned(page))) + if (unlikely(folio_test_has_hwpoisoned(folio))) return ret; /* @@ -4351,7 +4353,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); - page_add_file_rmap(page, vma, true); + folio_add_file_rmap_pmd(folio, page, vma); /* * deposit and withdraw with pmd lock held
Let's convert insert_page_into_pte_locked() and do_set_pmd(). While at it, perform some folio conversion. Signed-off-by: David Hildenbrand <david@redhat.com> --- mm/memory.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-)