diff mbox series

[RFC,v2,5/5] mm: support large folios swapin as a whole

Message ID 20240229003753.134193-6-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm: support large folios swap-in | expand

Commit Message

Barry Song Feb. 29, 2024, 12:37 a.m. UTC
From: Chuanhua Han <hanchuanhua@oppo.com>

On an embedded system like Android, more than half of anon memory is actually
in swap devices such as zRAM. For example, while an app is switched to back-
ground, its most memory might be swapped-out.

Now we have mTHP features, unfortunately, if we don't support large folios
swap-in, once those large folios are swapped-out, we immediately lose the
performance gain we can get through large folios and hardware optimization
such as CONT-PTE.

This patch brings up mTHP swap-in support. Right now, we limit mTHP swap-in
to those contiguous swaps which were likely swapped out from mTHP as a whole.

On the other hand, the current implementation only covers the SWAP_SYCHRONOUS
case. It doesn't support swapin_readahead as large folios yet.

Right now, we are re-faulting large folios which are still in swapcache as a
whole, this can effectively decrease extra loops and early-exitings which we
have increased in arch_swap_restore() while supporting MTE restore for folios
rather than page. On the other hand, it can also decrease do_swap_page as PTEs
used to be set one by one even we hit a large folio in swapcache.

Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
Co-developed-by: Barry Song <v-songbaohua@oppo.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 mm/memory.c | 191 ++++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 157 insertions(+), 34 deletions(-)

Comments

Barry Song March 4, 2024, 1:34 a.m. UTC | #1
On Thu, Feb 29, 2024 at 1:39 PM Barry Song <21cnbao@gmail.com> wrote:
>
> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> On an embedded system like Android, more than half of anon memory is actually
> in swap devices such as zRAM. For example, while an app is switched to back-
> ground, its most memory might be swapped-out.
>
> Now we have mTHP features, unfortunately, if we don't support large folios
> swap-in, once those large folios are swapped-out, we immediately lose the
> performance gain we can get through large folios and hardware optimization
> such as CONT-PTE.
>
> This patch brings up mTHP swap-in support. Right now, we limit mTHP swap-in
> to those contiguous swaps which were likely swapped out from mTHP as a whole.
>
> On the other hand, the current implementation only covers the SWAP_SYCHRONOUS
> case. It doesn't support swapin_readahead as large folios yet.
>
> Right now, we are re-faulting large folios which are still in swapcache as a
> whole, this can effectively decrease extra loops and early-exitings which we
> have increased in arch_swap_restore() while supporting MTE restore for folios
> rather than page. On the other hand, it can also decrease do_swap_page as PTEs
> used to be set one by one even we hit a large folio in swapcache.
>
> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>  mm/memory.c | 191 ++++++++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 157 insertions(+), 34 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 90b08b7cbaac..471689ce4e91 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -104,9 +104,16 @@ struct page *mem_map;
>  EXPORT_SYMBOL(mem_map);
>  #endif
>
> +/* A choice of behaviors for alloc_anon_folio() */
> +enum behavior {
> +       DO_SWAP_PAGE,
> +       DO_ANON_PAGE,
> +};
> +
>  static vm_fault_t do_fault(struct vm_fault *vmf);
>  static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
>  static bool vmf_pte_changed(struct vm_fault *vmf);
> +static struct folio *alloc_anon_folio(struct vm_fault *vmf, enum behavior behavior);
>
>  /*
>   * Return true if the original pte was a uffd-wp pte marker (so the pte was
> @@ -3974,6 +3981,52 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
>         return VM_FAULT_SIGBUS;
>  }
>
> +/*
> + * check a range of PTEs are completely swap entries with
> + * contiguous swap offsets and the same SWAP_HAS_CACHE.
> + * pte must be first one in the range
> + */
> +static bool is_pte_range_contig_swap(pte_t *pte, int nr_pages)
> +{
> +       int i;
> +       struct swap_info_struct *si;
> +       swp_entry_t entry;
> +       unsigned type;
> +       pgoff_t start_offset;
> +       char has_cache;
> +
> +       entry = pte_to_swp_entry(ptep_get_lockless(pte));
> +       if (non_swap_entry(entry))
> +               return false;
> +       start_offset = swp_offset(entry);
> +       if (start_offset % nr_pages)
> +               return false;
> +
> +       si = swp_swap_info(entry);
> +       type = swp_type(entry);
> +       has_cache = si->swap_map[start_offset] & SWAP_HAS_CACHE;
> +       for (i = 1; i < nr_pages; i++) {
> +               entry = pte_to_swp_entry(ptep_get_lockless(pte + i));
> +               if (non_swap_entry(entry))
> +                       return false;
> +               if (swp_offset(entry) != start_offset + i)
> +                       return false;
> +               if (swp_type(entry) != type)
> +                       return false;
> +               /*
> +                * while allocating a large folio and doing swap_read_folio for the
> +                * SWP_SYNCHRONOUS_IO path, which is the case the being faulted pte
> +                * doesn't have swapcache. We need to ensure all PTEs have no cache
> +                * as well, otherwise, we might go to swap devices while the content
> +                * is in swapcache
> +                */
> +               if ((si->swap_map[start_offset + i] & SWAP_HAS_CACHE) != has_cache)
> +                       return false;
> +       }
> +
> +       return true;
> +}
> +
>  /*
>   * We enter with non-exclusive mmap_lock (to exclude vma changes,
>   * but allow concurrent faults), and pte mapped but not yet locked.
> @@ -3995,6 +4048,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         pte_t pte;
>         vm_fault_t ret = 0;
>         void *shadow = NULL;
> +       int nr_pages = 1;
> +       unsigned long start_address;
> +       pte_t *start_pte;
>
>         if (!pte_unmap_same(vmf))
>                 goto out;
> @@ -4058,28 +4114,32 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         if (!folio) {
>                 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
>                     __swap_count(entry) == 1) {
> -                       /*
> -                        * Prevent parallel swapin from proceeding with
> -                        * the cache flag. Otherwise, another thread may
> -                        * finish swapin first, free the entry, and swapout
> -                        * reusing the same entry. It's undetectable as
> -                        * pte_same() returns true due to entry reuse.
> -                        */
> -                       if (swapcache_prepare(entry)) {
> -                               /* Relax a bit to prevent rapid repeated page faults */
> -                               schedule_timeout_uninterruptible(1);
> -                               goto out;
> -                       }
> -                       need_clear_cache = true;
> -
>                         /* skip swapcache */
> -                       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
> -                                               vma, vmf->address, false);
> +                       folio = alloc_anon_folio(vmf, DO_SWAP_PAGE);
>                         page = &folio->page;
>                         if (folio) {
>                                 __folio_set_locked(folio);
>                                 __folio_set_swapbacked(folio);
>
> +                               if (folio_test_large(folio)) {
> +                                       nr_pages = folio_nr_pages(folio);
> +                                       entry.val = ALIGN_DOWN(entry.val, nr_pages);
> +                               }
> +
> +                               /*
> +                                * Prevent parallel swapin from proceeding with
> +                                * the cache flag. Otherwise, another thread may
> +                                * finish swapin first, free the entry, and swapout
> +                                * reusing the same entry. It's undetectable as
> +                                * pte_same() returns true due to entry reuse.
> +                                */
> +                               if (swapcache_prepare_nr(entry, nr_pages)) {
> +                                       /* Relax a bit to prevent rapid repeated page faults */
> +                                       schedule_timeout_uninterruptible(1);
> +                                       goto out;
> +                               }
> +                               need_clear_cache = true;
> +
>                                 if (mem_cgroup_swapin_charge_folio(folio,
>                                                         vma->vm_mm, GFP_KERNEL,
>                                                         entry)) {
> @@ -4185,6 +4245,42 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          */
>         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
>                         &vmf->ptl);
> +
> +       start_address = vmf->address;
> +       start_pte = vmf->pte;
> +       if (folio_test_large(folio)) {
> +               unsigned long nr = folio_nr_pages(folio);
> +               unsigned long addr = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
> +               pte_t *aligned_pte = vmf->pte - (vmf->address - addr) / PAGE_SIZE;
> +
> +               /*
> +                * case 1: we are allocating large_folio, try to map it as a whole
> +                * iff the swap entries are still entirely mapped;
> +                * case 2: we hit a large folio in swapcache, and all swap entries
> +                * are still entirely mapped, try to map a large folio as a whole.
> +                * otherwise, map only the faulting page within the large folio
> +                * which is swapcache
> +                */
> +               if (!is_pte_range_contig_swap(aligned_pte, nr)) {
> +                       if (nr_pages > 1) /* ptes have changed for case 1 */
> +                               goto out_nomap;
> +                       goto check_pte;
> +               }
> +
> +               start_address = addr;
> +               start_pte = aligned_pte;
> +               /*
> +                * the below has been done before swap_read_folio()
> +                * for case 1
> +                */
> +               if (unlikely(folio == swapcache)) {
> +                       nr_pages = nr;
> +                       entry.val = ALIGN_DOWN(entry.val, nr_pages);
> +                       page = &folio->page;
> +               }
> +       }
> +
> +check_pte:
>         if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
>                 goto out_nomap;
>
> @@ -4252,12 +4348,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          * We're already holding a reference on the page but haven't mapped it
>          * yet.
>          */
> -       swap_free(entry);
> +       swap_nr_free(entry, nr_pages);
>         if (should_try_to_free_swap(folio, vma, vmf->flags))
>                 folio_free_swap(folio);
>
> -       inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> -       dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> +       folio_ref_add(folio, nr_pages - 1);
> +       add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> +       add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
> +
>         pte = mk_pte(page, vma->vm_page_prot);
>
>         /*
> @@ -4267,14 +4365,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>          * exclusivity.
>          */
>         if (!folio_test_ksm(folio) &&
> -           (exclusive || folio_ref_count(folio) == 1)) {
> +           (exclusive || folio_ref_count(folio) == nr_pages)) {
>                 if (vmf->flags & FAULT_FLAG_WRITE) {
>                         pte = maybe_mkwrite(pte_mkdirty(pte), vma);
>                         vmf->flags &= ~FAULT_FLAG_WRITE;
>                 }
>                 rmap_flags |= RMAP_EXCLUSIVE;
>         }
> -       flush_icache_page(vma, page);
> +       flush_icache_pages(vma, page, nr_pages);
>         if (pte_swp_soft_dirty(vmf->orig_pte))
>                 pte = pte_mksoft_dirty(pte);
>         if (pte_swp_uffd_wp(vmf->orig_pte))
> @@ -4283,17 +4381,19 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>
>         /* ksm created a completely new copy */
>         if (unlikely(folio != swapcache && swapcache)) {
> -               folio_add_new_anon_rmap(folio, vma, vmf->address);
> +               folio_add_new_anon_rmap(folio, vma, start_address);
>                 folio_add_lru_vma(folio, vma);
> +       } else if (!folio_test_anon(folio)) {
> +               folio_add_new_anon_rmap(folio, vma, start_address);
>         } else {
> -               folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
> +               folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, start_address,
>                                         rmap_flags);
>         }
>
>         VM_BUG_ON(!folio_test_anon(folio) ||
>                         (pte_write(pte) && !PageAnonExclusive(page)));
> -       set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> -       arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
> +       set_ptes(vma->vm_mm, start_address, start_pte, pte, nr_pages);
> +       arch_do_swap_page(vma->vm_mm, vma, start_address, pte, vmf->orig_pte);
>
>         folio_unlock(folio);
>         if (folio != swapcache && swapcache) {
> @@ -4310,6 +4410,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         }
>
>         if (vmf->flags & FAULT_FLAG_WRITE) {
> +               if (nr_pages > 1)
> +                       vmf->orig_pte = ptep_get(vmf->pte);
> +
>                 ret |= do_wp_page(vmf);
>                 if (ret & VM_FAULT_ERROR)
>                         ret &= VM_FAULT_ERROR;
> @@ -4317,14 +4420,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         }
>
>         /* No need to invalidate - it was non-present before */
> -       update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
> +       update_mmu_cache_range(vmf, vma, start_address, start_pte, nr_pages);
>  unlock:
>         if (vmf->pte)
>                 pte_unmap_unlock(vmf->pte, vmf->ptl);
>  out:
>         /* Clear the swap cache pin for direct swapin after PTL unlock */
>         if (need_clear_cache)
> -               swapcache_clear(si, entry);
> +               swapcache_clear_nr(si, entry, nr_pages);
>         if (si)
>                 put_swap_device(si);
>         return ret;
> @@ -4340,7 +4443,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>                 folio_put(swapcache);
>         }
>         if (need_clear_cache)
> -               swapcache_clear(si, entry);
> +               swapcache_clear_nr(si, entry, nr_pages);
>         if (si)
>                 put_swap_device(si);
>         return ret;
> @@ -4358,7 +4461,7 @@ static bool pte_range_none(pte_t *pte, int nr_pages)
>         return true;
>  }
>
> -static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> +static struct folio *alloc_anon_folio(struct vm_fault *vmf, enum behavior behavior)
>  {
>         struct vm_area_struct *vma = vmf->vma;
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> @@ -4376,6 +4479,19 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>         if (unlikely(userfaultfd_armed(vma)))
>                 goto fallback;
>
> +       /*
> +        * a large folio being swapped-in could be partially in
> +        * zswap and partially in swap devices, zswap doesn't
> +        * support large folios yet, we might get corrupted
> +        * zero-filled data by reading all subpages from swap
> +        * devices while some of them are actually in zswap
> +        */
> +       if (behavior == DO_SWAP_PAGE && is_zswap_enabled())
> +               goto fallback;
> +
> +       if (unlikely(behavior != DO_ANON_PAGE && behavior != DO_SWAP_PAGE))
> +               return ERR_PTR(-EINVAL);
> +
>         /*
>          * Get a list of all the (large) orders below PMD_ORDER that are enabled
>          * for this vma. Then filter out the orders that can't be allocated over
> @@ -4393,15 +4509,22 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>                 return ERR_PTR(-EAGAIN);
>
>         /*
> -        * Find the highest order where the aligned range is completely
> -        * pte_none(). Note that all remaining orders will be completely
> +        * For do_anonymous_page, find the highest order where the aligned range is
> +        * completely pte_none(). Note that all remaining orders will be completely
>          * pte_none().
> +        * For do_swap_page, find the highest order where the aligned range is
> +        * completely swap entries with contiguous swap offsets.
>          */
>         order = highest_order(orders);
>         while (orders) {
>                 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> -               if (pte_range_none(pte + pte_index(addr), 1 << order))
> -                       break;
> +               if (behavior == DO_ANON_PAGE) {
> +                       if (pte_range_none(pte + pte_index(addr), 1 << order))
> +                               break;
> +               } else  {
> +                       if (is_pte_range_contig_swap(pte + pte_index(addr), 1 << order))
> +                               break;
> +               }
>                 order = next_order(&orders, order);
>         }

We have a problem here.  alloc_anon_folio() is charging folio
        /* Try allocating the highest of the remaining orders. */
        gfp = vma_thp_gfp_mask(vma);
        while (orders) {
                addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
                folio = vma_alloc_folio(gfp, order, vma, addr, true);
                if (folio) {
                        if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
                                folio_put(folio);
                                goto next;
                        }
                        folio_throttle_swaprate(folio, gfp);
                        clear_huge_page(&folio->page, vmf->address, 1 << order);
                        return folio;
                }
next:
                order = next_order(&orders, order);
        }
This is necessary for DO_ANON_PAGE. but for DO_SWAP_PAGE, this is
wrong.

because in do_swap_page, mem_cgroup_swapin_charge_folio() is done again.
                                if (mem_cgroup_swapin_charge_folio(folio,
                                                        vma->vm_mm, GFP_KERNEL,
                                                        entry)) {
                                        ret = VM_FAULT_OOM;
                                        goto out_page;
                                }

So in the do_swap_page() case, charging is done twice.
will get it fixed in v3.

This is also true for folio_prealloc() at the end of alloc_anon_folio()

>
> @@ -4485,7 +4608,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
>         if (unlikely(anon_vma_prepare(vma)))
>                 goto oom;
>         /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
> -       folio = alloc_anon_folio(vmf);
> +       folio = alloc_anon_folio(vmf, DO_ANON_PAGE);
>         if (IS_ERR(folio))
>                 return 0;
>         if (!folio)
> --
> 2.34.1
>

Thanks
Barry
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 90b08b7cbaac..471689ce4e91 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -104,9 +104,16 @@  struct page *mem_map;
 EXPORT_SYMBOL(mem_map);
 #endif
 
+/* A choice of behaviors for alloc_anon_folio() */
+enum behavior {
+	DO_SWAP_PAGE,
+	DO_ANON_PAGE,
+};
+
 static vm_fault_t do_fault(struct vm_fault *vmf);
 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
 static bool vmf_pte_changed(struct vm_fault *vmf);
+static struct folio *alloc_anon_folio(struct vm_fault *vmf, enum behavior behavior);
 
 /*
  * Return true if the original pte was a uffd-wp pte marker (so the pte was
@@ -3974,6 +3981,52 @@  static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
 	return VM_FAULT_SIGBUS;
 }
 
+/*
+ * check a range of PTEs are completely swap entries with
+ * contiguous swap offsets and the same SWAP_HAS_CACHE.
+ * pte must be first one in the range
+ */
+static bool is_pte_range_contig_swap(pte_t *pte, int nr_pages)
+{
+	int i;
+	struct swap_info_struct *si;
+	swp_entry_t entry;
+	unsigned type;
+	pgoff_t start_offset;
+	char has_cache;
+
+	entry = pte_to_swp_entry(ptep_get_lockless(pte));
+	if (non_swap_entry(entry))
+		return false;
+	start_offset = swp_offset(entry);
+	if (start_offset % nr_pages)
+		return false;
+
+	si = swp_swap_info(entry);
+	type = swp_type(entry);
+	has_cache = si->swap_map[start_offset] & SWAP_HAS_CACHE;
+	for (i = 1; i < nr_pages; i++) {
+		entry = pte_to_swp_entry(ptep_get_lockless(pte + i));
+		if (non_swap_entry(entry))
+			return false;
+		if (swp_offset(entry) != start_offset + i)
+			return false;
+		if (swp_type(entry) != type)
+			return false;
+		/*
+		 * while allocating a large folio and doing swap_read_folio for the
+		 * SWP_SYNCHRONOUS_IO path, which is the case the being faulted pte
+		 * doesn't have swapcache. We need to ensure all PTEs have no cache
+		 * as well, otherwise, we might go to swap devices while the content
+		 * is in swapcache
+		 */
+		if ((si->swap_map[start_offset + i] & SWAP_HAS_CACHE) != has_cache)
+			return false;
+	}
+
+	return true;
+}
+
 /*
  * We enter with non-exclusive mmap_lock (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -3995,6 +4048,9 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	pte_t pte;
 	vm_fault_t ret = 0;
 	void *shadow = NULL;
+	int nr_pages = 1;
+	unsigned long start_address;
+	pte_t *start_pte;
 
 	if (!pte_unmap_same(vmf))
 		goto out;
@@ -4058,28 +4114,32 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	if (!folio) {
 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
 		    __swap_count(entry) == 1) {
-			/*
-			 * Prevent parallel swapin from proceeding with
-			 * the cache flag. Otherwise, another thread may
-			 * finish swapin first, free the entry, and swapout
-			 * reusing the same entry. It's undetectable as
-			 * pte_same() returns true due to entry reuse.
-			 */
-			if (swapcache_prepare(entry)) {
-				/* Relax a bit to prevent rapid repeated page faults */
-				schedule_timeout_uninterruptible(1);
-				goto out;
-			}
-			need_clear_cache = true;
-
 			/* skip swapcache */
-			folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
-						vma, vmf->address, false);
+			folio = alloc_anon_folio(vmf, DO_SWAP_PAGE);
 			page = &folio->page;
 			if (folio) {
 				__folio_set_locked(folio);
 				__folio_set_swapbacked(folio);
 
+				if (folio_test_large(folio)) {
+					nr_pages = folio_nr_pages(folio);
+					entry.val = ALIGN_DOWN(entry.val, nr_pages);
+				}
+
+				/*
+				 * Prevent parallel swapin from proceeding with
+				 * the cache flag. Otherwise, another thread may
+				 * finish swapin first, free the entry, and swapout
+				 * reusing the same entry. It's undetectable as
+				 * pte_same() returns true due to entry reuse.
+				 */
+				if (swapcache_prepare_nr(entry, nr_pages)) {
+					/* Relax a bit to prevent rapid repeated page faults */
+					schedule_timeout_uninterruptible(1);
+					goto out;
+				}
+				need_clear_cache = true;
+
 				if (mem_cgroup_swapin_charge_folio(folio,
 							vma->vm_mm, GFP_KERNEL,
 							entry)) {
@@ -4185,6 +4245,42 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 */
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
 			&vmf->ptl);
+
+	start_address = vmf->address;
+	start_pte = vmf->pte;
+	if (folio_test_large(folio)) {
+		unsigned long nr = folio_nr_pages(folio);
+		unsigned long addr = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE);
+		pte_t *aligned_pte = vmf->pte - (vmf->address - addr) / PAGE_SIZE;
+
+		/*
+		 * case 1: we are allocating large_folio, try to map it as a whole
+		 * iff the swap entries are still entirely mapped;
+		 * case 2: we hit a large folio in swapcache, and all swap entries
+		 * are still entirely mapped, try to map a large folio as a whole.
+		 * otherwise, map only the faulting page within the large folio
+		 * which is swapcache
+		 */
+		if (!is_pte_range_contig_swap(aligned_pte, nr)) {
+			if (nr_pages > 1) /* ptes have changed for case 1 */
+				goto out_nomap;
+			goto check_pte;
+		}
+
+		start_address = addr;
+		start_pte = aligned_pte;
+		/*
+		 * the below has been done before swap_read_folio()
+		 * for case 1
+		 */
+		if (unlikely(folio == swapcache)) {
+			nr_pages = nr;
+			entry.val = ALIGN_DOWN(entry.val, nr_pages);
+			page = &folio->page;
+		}
+	}
+
+check_pte:
 	if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
 		goto out_nomap;
 
@@ -4252,12 +4348,14 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 * We're already holding a reference on the page but haven't mapped it
 	 * yet.
 	 */
-	swap_free(entry);
+	swap_nr_free(entry, nr_pages);
 	if (should_try_to_free_swap(folio, vma, vmf->flags))
 		folio_free_swap(folio);
 
-	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+	folio_ref_add(folio, nr_pages - 1);
+	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
+	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
+
 	pte = mk_pte(page, vma->vm_page_prot);
 
 	/*
@@ -4267,14 +4365,14 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	 * exclusivity.
 	 */
 	if (!folio_test_ksm(folio) &&
-	    (exclusive || folio_ref_count(folio) == 1)) {
+	    (exclusive || folio_ref_count(folio) == nr_pages)) {
 		if (vmf->flags & FAULT_FLAG_WRITE) {
 			pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 			vmf->flags &= ~FAULT_FLAG_WRITE;
 		}
 		rmap_flags |= RMAP_EXCLUSIVE;
 	}
-	flush_icache_page(vma, page);
+	flush_icache_pages(vma, page, nr_pages);
 	if (pte_swp_soft_dirty(vmf->orig_pte))
 		pte = pte_mksoft_dirty(pte);
 	if (pte_swp_uffd_wp(vmf->orig_pte))
@@ -4283,17 +4381,19 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 
 	/* ksm created a completely new copy */
 	if (unlikely(folio != swapcache && swapcache)) {
-		folio_add_new_anon_rmap(folio, vma, vmf->address);
+		folio_add_new_anon_rmap(folio, vma, start_address);
 		folio_add_lru_vma(folio, vma);
+	} else if (!folio_test_anon(folio)) {
+		folio_add_new_anon_rmap(folio, vma, start_address);
 	} else {
-		folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, start_address,
 					rmap_flags);
 	}
 
 	VM_BUG_ON(!folio_test_anon(folio) ||
 			(pte_write(pte) && !PageAnonExclusive(page)));
-	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
-	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
+	set_ptes(vma->vm_mm, start_address, start_pte, pte, nr_pages);
+	arch_do_swap_page(vma->vm_mm, vma, start_address, pte, vmf->orig_pte);
 
 	folio_unlock(folio);
 	if (folio != swapcache && swapcache) {
@@ -4310,6 +4410,9 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	}
 
 	if (vmf->flags & FAULT_FLAG_WRITE) {
+		if (nr_pages > 1)
+			vmf->orig_pte = ptep_get(vmf->pte);
+
 		ret |= do_wp_page(vmf);
 		if (ret & VM_FAULT_ERROR)
 			ret &= VM_FAULT_ERROR;
@@ -4317,14 +4420,14 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	}
 
 	/* No need to invalidate - it was non-present before */
-	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
+	update_mmu_cache_range(vmf, vma, start_address, start_pte, nr_pages);
 unlock:
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
 	/* Clear the swap cache pin for direct swapin after PTL unlock */
 	if (need_clear_cache)
-		swapcache_clear(si, entry);
+		swapcache_clear_nr(si, entry, nr_pages);
 	if (si)
 		put_swap_device(si);
 	return ret;
@@ -4340,7 +4443,7 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		folio_put(swapcache);
 	}
 	if (need_clear_cache)
-		swapcache_clear(si, entry);
+		swapcache_clear_nr(si, entry, nr_pages);
 	if (si)
 		put_swap_device(si);
 	return ret;
@@ -4358,7 +4461,7 @@  static bool pte_range_none(pte_t *pte, int nr_pages)
 	return true;
 }
 
-static struct folio *alloc_anon_folio(struct vm_fault *vmf)
+static struct folio *alloc_anon_folio(struct vm_fault *vmf, enum behavior behavior)
 {
 	struct vm_area_struct *vma = vmf->vma;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -4376,6 +4479,19 @@  static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 	if (unlikely(userfaultfd_armed(vma)))
 		goto fallback;
 
+	/*
+	 * a large folio being swapped-in could be partially in
+	 * zswap and partially in swap devices, zswap doesn't
+	 * support large folios yet, we might get corrupted
+	 * zero-filled data by reading all subpages from swap
+	 * devices while some of them are actually in zswap
+	 */
+	if (behavior == DO_SWAP_PAGE && is_zswap_enabled())
+		goto fallback;
+
+	if (unlikely(behavior != DO_ANON_PAGE && behavior != DO_SWAP_PAGE))
+		return ERR_PTR(-EINVAL);
+
 	/*
 	 * Get a list of all the (large) orders below PMD_ORDER that are enabled
 	 * for this vma. Then filter out the orders that can't be allocated over
@@ -4393,15 +4509,22 @@  static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 		return ERR_PTR(-EAGAIN);
 
 	/*
-	 * Find the highest order where the aligned range is completely
-	 * pte_none(). Note that all remaining orders will be completely
+	 * For do_anonymous_page, find the highest order where the aligned range is
+	 * completely pte_none(). Note that all remaining orders will be completely
 	 * pte_none().
+	 * For do_swap_page, find the highest order where the aligned range is
+	 * completely swap entries with contiguous swap offsets.
 	 */
 	order = highest_order(orders);
 	while (orders) {
 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-		if (pte_range_none(pte + pte_index(addr), 1 << order))
-			break;
+		if (behavior == DO_ANON_PAGE) {
+			if (pte_range_none(pte + pte_index(addr), 1 << order))
+				break;
+		} else  {
+			if (is_pte_range_contig_swap(pte + pte_index(addr), 1 << order))
+				break;
+		}
 		order = next_order(&orders, order);
 	}
 
@@ -4485,7 +4608,7 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
 	/* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
-	folio = alloc_anon_folio(vmf);
+	folio = alloc_anon_folio(vmf, DO_ANON_PAGE);
 	if (IS_ERR(folio))
 		return 0;
 	if (!folio)