diff mbox series

[4/4] mm: swap: entirely map large folios found in swapcache

Message ID 20240402073237.240995-5-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series large folios swap-in: handle refault cases first | expand

Commit Message

Barry Song April 2, 2024, 7:32 a.m. UTC
From: Chuanhua Han <hanchuanhua@oppo.com>

When a large folio is found in the swapcache, the current implementation
requires calling do_swap_page() nr_pages times, resulting in nr_pages
page faults. This patch opts to map the entire large folio at once to
minimize page faults. Additionally, redundant checks and early exits
for ARM64 MTE restoring are removed.

Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
Co-developed-by: Barry Song <v-songbaohua@oppo.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 mm/memory.c | 61 ++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 49 insertions(+), 12 deletions(-)

Comments

Barry Song April 7, 2024, 2:24 a.m. UTC | #1
On Tue, Apr 2, 2024 at 8:33 PM Barry Song <21cnbao@gmail.com> wrote:
>
> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> When a large folio is found in the swapcache, the current implementation
> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> page faults. This patch opts to map the entire large folio at once to
> minimize page faults. Additionally, redundant checks and early exits
> for ARM64 MTE restoring are removed.
>
> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>  mm/memory.c | 61 ++++++++++++++++++++++++++++++++++++++++++-----------
>  1 file changed, 49 insertions(+), 12 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 0a80e75af22c..5f52db6eb494 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3941,6 +3941,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         pte_t pte;
>         vm_fault_t ret = 0;
>         void *shadow = NULL;
> +       int nr_pages = 1;
> +       unsigned long start_address = vmf->address;
> +       pte_t *start_pte = vmf->pte;
> +       bool any_swap_shared = false;
>
>         if (!pte_unmap_same(vmf))
>                 goto out;
> @@ -4131,6 +4135,30 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          */
>         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
>                         &vmf->ptl);
> +
> +       /* We hit large folios in swapcache */
> +       if (start_pte && folio_test_large(folio) && folio_test_swapcache(folio)) {
> +               unsigned long folio_start = vmf->address - folio_page_idx(folio, page) * PAGE_SIZE;
> +               unsigned long folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE;
> +               pte_t *folio_pte = vmf->pte - folio_page_idx(folio, page);
> +               int nr = folio_nr_pages(folio);
> +
> +               if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
> +                       goto check_pte;
> +               if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
> +                       goto check_pte;
> +
> +               if (swap_pte_batch(folio_pte, nr, folio->swap, &any_swap_shared) != nr)
> +                       goto check_pte;
> +
> +               start_address = folio_start;
> +               start_pte = folio_pte;
> +               nr_pages = nr;
> +               entry = folio->swap;
> +               page = &folio->page;
> +       }
> +
> +check_pte:
>         if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
>                 goto out_nomap;
>
> @@ -4184,6 +4212,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>                          */
>                         exclusive = false;
>                 }
> +
> +               /* Reuse the whole large folio iff all entries are exclusive */
> +               if (nr_pages > 1 && any_swap_shared)
> +                       exclusive = false;
>         }
>
>         /*
> @@ -4198,12 +4230,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          * We're already holding a reference on the page but haven't mapped it
>          * yet.
>          */
> -       swap_free(entry);
> +       swap_free_nr(entry, nr_pages);
>         if (should_try_to_free_swap(folio, vma, vmf->flags))
>                 folio_free_swap(folio);
>
> -       inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> -       dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> +       folio_ref_add(folio, nr_pages - 1);
> +       add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> +       add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> +
>         pte = mk_pte(page, vma->vm_page_prot);
>
>         /*
> @@ -4213,33 +4247,36 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          * exclusivity.
>          */
>         if (!folio_test_ksm(folio) &&
> -           (exclusive || folio_ref_count(folio) == 1)) {
> +           (exclusive || (folio_ref_count(folio) == nr_pages &&
> +                          folio_nr_pages(folio) == nr_pages))) {
>                 if (vmf->flags & FAULT_FLAG_WRITE) {
>                         pte = maybe_mkwrite(pte_mkdirty(pte), vma);
>                         vmf->flags &= ~FAULT_FLAG_WRITE;
>                 }
>                 rmap_flags |= RMAP_EXCLUSIVE;
>         }
> -       flush_icache_page(vma, page);
> +       flush_icache_pages(vma, page, nr_pages);
>         if (pte_swp_soft_dirty(vmf->orig_pte))
>                 pte = pte_mksoft_dirty(pte);
>         if (pte_swp_uffd_wp(vmf->orig_pte))
>                 pte = pte_mkuffd_wp(pte);
> -       vmf->orig_pte = pte;
>
>         /* ksm created a completely new copy */
>         if (unlikely(folio != swapcache && swapcache)) {
> -               folio_add_new_anon_rmap(folio, vma, vmf->address);
> +               folio_add_new_anon_rmap(folio, vma, start_address);
>                 folio_add_lru_vma(folio, vma);
> +       } else if (!folio_test_anon(folio)) {
> +               folio_add_new_anon_rmap(folio, vma, start_address);

The above two lines of code should be removed. Since we're solely addressing
refault cases of large folios in this patchset.
We're constantly dealing with anonymous mappings of large folios now. However,
as we prepare to address non-refault cases of large folios swap-in, per David's
suggestion in a separate thread, we'll need to extend a wrapper function,
folio_add_shared_new_anon_rmap(), to accommodate non-exclusive
new anonymous folios[1].

[1] https://lore.kernel.org/linux-mm/CAGsJ_4xKTj1PwmJAAZAzAvEN53kze5wSPHb01pVg9LBy80axGA@mail.gmail.com/

>         } else {
> -               folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
> -                                       rmap_flags);
> +               folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, start_address,
> +                                        rmap_flags);
>         }
>
>         VM_BUG_ON(!folio_test_anon(folio) ||
>                         (pte_write(pte) && !PageAnonExclusive(page)));
> -       set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> -       arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
> +       set_ptes(vma->vm_mm, start_address, start_pte, pte, nr_pages);
> +       vmf->orig_pte = ptep_get(vmf->pte);
> +       arch_do_swap_page(vma->vm_mm, vma, start_address, pte, pte);
>
>         folio_unlock(folio);
>         if (folio != swapcache && swapcache) {
> @@ -4263,7 +4300,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         }
>
>         /* No need to invalidate - it was non-present before */
> -       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
> +       update_mmu_cache_range(vmf, vma, start_address, start_pte, nr_pages);
>  unlock:
>         if (vmf->pte)
>                 pte_unmap_unlock(vmf->pte, vmf->ptl);
> --
> 2.34.1
>

Thanks
Barry
Huang, Ying April 8, 2024, 7:18 a.m. UTC | #2
Barry Song <21cnbao@gmail.com> writes:

> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> When a large folio is found in the swapcache, the current implementation
> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> page faults. This patch opts to map the entire large folio at once to
> minimize page faults. Additionally, redundant checks and early exits
> for ARM64 MTE restoring are removed.

For large folios in reclaiming, it makes sense to restore all PTE
mappings to the large folio to reduce the number of page faults.

But for large folios swapped in, I think that it's better to map one PTE
which triggers the page fault only.  Because this makes us get the
opportunity to trap the page accesses to the sub-pages of the large
folio that is swapped in ahead (kind of swap readahead).  Then we can
decide the order of large folio swapin based on the readahead window
information.  That is, we may need to check PageReadahead() to decide
whether to map all PTEs in the future.

--
Best Regards,
Huang, Ying
Barry Song April 8, 2024, 7:27 a.m. UTC | #3
On Mon, Apr 8, 2024 at 7:20 PM Huang, Ying <ying.huang@intel.com> wrote:
>
> Barry Song <21cnbao@gmail.com> writes:
>
> > From: Chuanhua Han <hanchuanhua@oppo.com>
> >
> > When a large folio is found in the swapcache, the current implementation
> > requires calling do_swap_page() nr_pages times, resulting in nr_pages
> > page faults. This patch opts to map the entire large folio at once to
> > minimize page faults. Additionally, redundant checks and early exits
> > for ARM64 MTE restoring are removed.
>
> For large folios in reclaiming, it makes sense to restore all PTE
> mappings to the large folio to reduce the number of page faults.
>

Indeed, this patch addresses the refault case first, much less controversial
then :-)

> But for large folios swapped in, I think that it's better to map one PTE
> which triggers the page fault only.  Because this makes us get the
> opportunity to trap the page accesses to the sub-pages of the large
> folio that is swapped in ahead (kind of swap readahead).  Then we can
> decide the order of large folio swapin based on the readahead window
> information.  That is, we may need to check PageReadahead() to decide
> whether to map all PTEs in the future.

Another scenario occurs when a process opts to utilize large folios for
swap_readahead. Subsequently, another process encounters the large
folios introduced by the former process. In this case, would it be optimal
to fully map them just like the refault case?

>
> --
> Best Regards,
> Huang, Ying

Thanks
Barry
Huang, Ying April 8, 2024, 7:49 a.m. UTC | #4
Barry Song <21cnbao@gmail.com> writes:

> On Mon, Apr 8, 2024 at 7:20 PM Huang, Ying <ying.huang@intel.com> wrote:
>>
>> Barry Song <21cnbao@gmail.com> writes:
>>
>> > From: Chuanhua Han <hanchuanhua@oppo.com>
>> >
>> > When a large folio is found in the swapcache, the current implementation
>> > requires calling do_swap_page() nr_pages times, resulting in nr_pages
>> > page faults. This patch opts to map the entire large folio at once to
>> > minimize page faults. Additionally, redundant checks and early exits
>> > for ARM64 MTE restoring are removed.
>>
>> For large folios in reclaiming, it makes sense to restore all PTE
>> mappings to the large folio to reduce the number of page faults.
>>
>
> Indeed, this patch addresses the refault case first, much less controversial
> then :-)
>
>> But for large folios swapped in, I think that it's better to map one PTE
>> which triggers the page fault only.  Because this makes us get the
>> opportunity to trap the page accesses to the sub-pages of the large
>> folio that is swapped in ahead (kind of swap readahead).  Then we can
>> decide the order of large folio swapin based on the readahead window
>> information.  That is, we may need to check PageReadahead() to decide
>> whether to map all PTEs in the future.
>
> Another scenario occurs when a process opts to utilize large folios for
> swap_readahead. Subsequently, another process encounters the large
> folios introduced by the former process. In this case, would it be optimal
> to fully map them just like the refault case?

We only need to trap the first access to the readahead sub-page.  So, we
can map PTE for all sub-pages without PageReadahead().

IIUC, now readahead flag is per-folio, we may need to change it to
per-sub-page when needed.

--
Best Regards,
Huang, Ying
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 0a80e75af22c..5f52db6eb494 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3941,6 +3941,10 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	pte_t pte;
 	vm_fault_t ret = 0;
 	void *shadow = NULL;
+	int nr_pages = 1;
+	unsigned long start_address = vmf->address;
+	pte_t *start_pte = vmf->pte;
+	bool any_swap_shared = false;
 
 	if (!pte_unmap_same(vmf))
 		goto out;
@@ -4131,6 +4135,30 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 */
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
 			&vmf->ptl);
+
+	/* We hit large folios in swapcache */
+	if (start_pte && folio_test_large(folio) && folio_test_swapcache(folio)) {
+		unsigned long folio_start = vmf->address - folio_page_idx(folio, page) * PAGE_SIZE;
+		unsigned long folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE;
+		pte_t *folio_pte = vmf->pte - folio_page_idx(folio, page);
+		int nr = folio_nr_pages(folio);
+
+		if (unlikely(folio_start < max(vmf->address & PMD_MASK, vma->vm_start)))
+			goto check_pte;
+		if (unlikely(folio_end > pmd_addr_end(vmf->address, vma->vm_end)))
+			goto check_pte;
+
+		if (swap_pte_batch(folio_pte, nr, folio->swap, &any_swap_shared) != nr)
+			goto check_pte;
+
+		start_address = folio_start;
+		start_pte = folio_pte;
+		nr_pages = nr;
+		entry = folio->swap;
+		page = &folio->page;
+	}
+
+check_pte:
 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
 		goto out_nomap;
 
@@ -4184,6 +4212,10 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 			 */
 			exclusive = false;
 		}
+
+		/* Reuse the whole large folio iff all entries are exclusive */
+		if (nr_pages > 1 && any_swap_shared)
+			exclusive = false;
 	}
 
 	/*
@@ -4198,12 +4230,14 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 * We're already holding a reference on the page but haven't mapped it
 	 * yet.
 	 */
-	swap_free(entry);
+	swap_free_nr(entry, nr_pages);
 	if (should_try_to_free_swap(folio, vma, vmf->flags))
 		folio_free_swap(folio);
 
-	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+	folio_ref_add(folio, nr_pages - 1);
+	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
+
 	pte = mk_pte(page, vma->vm_page_prot);
 
 	/*
@@ -4213,33 +4247,36 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 * exclusivity.
 	 */
 	if (!folio_test_ksm(folio) &&
-	    (exclusive || folio_ref_count(folio) == 1)) {
+	    (exclusive || (folio_ref_count(folio) == nr_pages &&
+			   folio_nr_pages(folio) == nr_pages))) {
 		if (vmf->flags & FAULT_FLAG_WRITE) {
 			pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 			vmf->flags &= ~FAULT_FLAG_WRITE;
 		}
 		rmap_flags |= RMAP_EXCLUSIVE;
 	}
-	flush_icache_page(vma, page);
+	flush_icache_pages(vma, page, nr_pages);
 	if (pte_swp_soft_dirty(vmf->orig_pte))
 		pte = pte_mksoft_dirty(pte);
 	if (pte_swp_uffd_wp(vmf->orig_pte))
 		pte = pte_mkuffd_wp(pte);
-	vmf->orig_pte = pte;
 
 	/* ksm created a completely new copy */
 	if (unlikely(folio != swapcache && swapcache)) {
-		folio_add_new_anon_rmap(folio, vma, vmf->address);
+		folio_add_new_anon_rmap(folio, vma, start_address);
 		folio_add_lru_vma(folio, vma);
+	} else if (!folio_test_anon(folio)) {
+		folio_add_new_anon_rmap(folio, vma, start_address);
 	} else {
-		folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
-					rmap_flags);
+		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, start_address,
+					 rmap_flags);
 	}
 
 	VM_BUG_ON(!folio_test_anon(folio) ||
 			(pte_write(pte) && !PageAnonExclusive(page)));
-	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
-	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
+	set_ptes(vma->vm_mm, start_address, start_pte, pte, nr_pages);
+	vmf->orig_pte = ptep_get(vmf->pte);
+	arch_do_swap_page(vma->vm_mm, vma, start_address, pte, pte);
 
 	folio_unlock(folio);
 	if (folio != swapcache && swapcache) {
@@ -4263,7 +4300,7 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	}
 
 	/* No need to invalidate - it was non-present before */
-	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+	update_mmu_cache_range(vmf, vma, start_address, start_pte, nr_pages);
 unlock:
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);