diff mbox series

[v8,3/3] mm/vmscan: avoid split lazyfree THP during shrink_folio_list()

Message ID 20240614015138.31461-4-ioworker0@gmail.com (mailing list archive)
State New
Headers show
Series Reclaim lazyfree THP without splitting | expand

Commit Message

Lance Yang June 14, 2024, 1:51 a.m. UTC
When the user no longer requires the pages, they would use
madvise(MADV_FREE) to mark the pages as lazy free. Subsequently, they
typically would not re-write to that memory again.

During memory reclaim, if we detect that the large folio and its PMD are
both still marked as clean and there are no unexpected references
(such as GUP), so we can just discard the memory lazily, improving the
efficiency of memory reclamation in this case.

On an Intel i5 CPU, reclaiming 1GiB of lazyfree THPs using
mem_cgroup_force_empty() results in the following runtimes in seconds
(shorter is better):

--------------------------------------------
|     Old       |      New       |  Change  |
--------------------------------------------
|   0.683426    |    0.049197    |  -92.80% |
--------------------------------------------

Suggested-by: Zi Yan <ziy@nvidia.com>
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Lance Yang <ioworker0@gmail.com>
---
 include/linux/huge_mm.h |  9 +++++
 mm/huge_memory.c        | 76 +++++++++++++++++++++++++++++++++++++++++
 mm/rmap.c               | 27 +++++++++------
 3 files changed, 102 insertions(+), 10 deletions(-)

Comments

David Hildenbrand June 17, 2024, 6:04 p.m. UTC | #1
Sorry for taking so long to review ... getting there. Mostly nits.

> @@ -497,6 +499,13 @@ static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
>   					 unsigned long address, pmd_t *pmd,
>   					 bool freeze, struct folio *folio) {}
>   
> +static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
> +					 unsigned long addr, pmd_t *pmdp,
> +					 struct folio *folio)
> +{
> +	return false;
> +}
> +
>   #define split_huge_pud(__vma, __pmd, __address)	\
>   	do { } while (0)
>   
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e766d3f3a302..425374ae06ed 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2688,6 +2688,82 @@ static void unmap_folio(struct folio *folio)
>   	try_to_unmap_flush();
>   }
>   
> +static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
> +					    unsigned long addr, pmd_t *pmdp,
> +					    struct folio *folio)
> +{
> +	VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
> +	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);

I would drop these (that's exactly what the single caller checks). In 
any case don't place them above the variable declaration ;)

> +
> +	struct mm_struct *mm = vma->vm_mm;
> +	int ref_count, map_count;
> +	pmd_t orig_pmd = *pmdp;
> +	struct page *page;
> +
> +	if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
> +		return false;
> +
> +	page = pmd_page(orig_pmd);
> +	if (unlikely(page_folio(page) != folio))
> +		return false;

I'm curious, how could that happen? And how could it happen that we have 
!pmd_trans_huge() ? Didn't rmap walking code make sure that this PMD 
maps the folio already, and we are holding the PTL?

> +
> +	if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
> +		return false;
> +
> +	orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
> +
> +	/*
> +	 * Syncing against concurrent GUP-fast:
> +	 * - clear PMD; barrier; read refcount
> +	 * - inc refcount; barrier; read PMD
> +	 */
> +	smp_mb();
> +
> +	ref_count = folio_ref_count(folio);
> +	map_count = folio_mapcount(folio);
> +
> +	/*
> +	 * Order reads for folio refcount and dirty flag
> +	 * (see comments in __remove_mapping()).
> +	 */
> +	smp_rmb();
> +
> +	/*
> +	 * If the folio or its PMD is redirtied at this point, or if there
> +	 * are unexpected references, we will give up to discard this folio
> +	 * and remap it.
> +	 *
> +	 * The only folio refs must be one from isolation plus the rmap(s).
> +	 */
> +	if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
> +	    ref_count != map_count + 1) {
> +		set_pmd_at(mm, addr, pmdp, orig_pmd);
> +		return false;
> +	}
> +
> +	folio_remove_rmap_pmd(folio, page, vma);
> +	zap_deposited_table(mm, pmdp);
> +	add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +	if (vma->vm_flags & VM_LOCKED)
> +		mlock_drain_local();
> +	folio_put(folio);
> +
> +	return true;
> +}
> +
> +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> +			   pmd_t *pmdp, struct folio *folio)
> +{
> +	VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
> +	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> +	VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
> +
> +	if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
> +		return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
> +
> +	return false;
> +}
> +
>   static void remap_page(struct folio *folio, unsigned long nr)
>   {
>   	int i = 0;
> diff --git a/mm/rmap.c b/mm/rmap.c
> index dacf24bc82f0..7d97806f74cd 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1678,16 +1678,23 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   			goto walk_abort;
>   		}
>   
> -		if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> -			/*
> -			 * We temporarily have to drop the PTL and start once
> -			 * again from that now-PTE-mapped page table.
> -			 */
> -			split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> -					      false, folio);
> -			flags &= ~TTU_SPLIT_HUGE_PMD;
> -			page_vma_mapped_walk_restart(&pvmw);
> -			continue;
> +		if (!pvmw.pte) {
> +			if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> +						  folio))
> +				goto walk_done;
> +
> +			if (flags & TTU_SPLIT_HUGE_PMD) {
> +				/*
> +				 * We temporarily have to drop the PTL and start
> +				 * once again from that now-PTE-mapped page
> +				 * table.

Nit: it's not a PTE-mapped page table.

Maybe

"... restart so we can process the PTE-mapped THP."



>   		}
>   
>   		/* Unexpected PMD-mapped THP? */

Nothing else jumped at me :)
Lance Yang June 18, 2024, 1:56 a.m. UTC | #2
On Tue, Jun 18, 2024 at 2:04 AM David Hildenbrand <david@redhat.com> wrote:
>
> Sorry for taking so long to review ... getting there. Mostly nits.

No worries at all :)

Thanks for taking time to review!

>
> > @@ -497,6 +499,13 @@ static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
> >                                        unsigned long address, pmd_t *pmd,
> >                                        bool freeze, struct folio *folio) {}
> >
> > +static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
> > +                                      unsigned long addr, pmd_t *pmdp,
> > +                                      struct folio *folio)
> > +{
> > +     return false;
> > +}
> > +
> >   #define split_huge_pud(__vma, __pmd, __address)     \
> >       do { } while (0)
> >
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index e766d3f3a302..425374ae06ed 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2688,6 +2688,82 @@ static void unmap_folio(struct folio *folio)
> >       try_to_unmap_flush();
> >   }
> >
> > +static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
> > +                                         unsigned long addr, pmd_t *pmdp,
> > +                                         struct folio *folio)
> > +{
> > +     VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
> > +     VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
>
> I would drop these (that's exactly what the single caller checks). In

Agreed. I will drop these.

> any case don't place them above the variable declaration ;)

Yep, I see.

>
> > +
> > +     struct mm_struct *mm = vma->vm_mm;
> > +     int ref_count, map_count;
> > +     pmd_t orig_pmd = *pmdp;
> > +     struct page *page;
> > +
> > +     if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
> > +             return false;
> > +
> > +     page = pmd_page(orig_pmd);
> > +     if (unlikely(page_folio(page) != folio))
> > +             return false;
>
> I'm curious, how could that happen? And how could it happen that we have
> !pmd_trans_huge() ? Didn't rmap walking code make sure that this PMD
> maps the folio already, and we are holding the PTL?

Makes sense to me. I was adding these just in case, but it's probably too much.

Let's drop them ;)

>
> > +
> > +     if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
> > +             return false;
> > +
> > +     orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
> > +
> > +     /*
> > +      * Syncing against concurrent GUP-fast:
> > +      * - clear PMD; barrier; read refcount
> > +      * - inc refcount; barrier; read PMD
> > +      */
> > +     smp_mb();
> > +
> > +     ref_count = folio_ref_count(folio);
> > +     map_count = folio_mapcount(folio);
> > +
> > +     /*
> > +      * Order reads for folio refcount and dirty flag
> > +      * (see comments in __remove_mapping()).
> > +      */
> > +     smp_rmb();
> > +
> > +     /*
> > +      * If the folio or its PMD is redirtied at this point, or if there
> > +      * are unexpected references, we will give up to discard this folio
> > +      * and remap it.
> > +      *
> > +      * The only folio refs must be one from isolation plus the rmap(s).
> > +      */
> > +     if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
> > +         ref_count != map_count + 1) {
> > +             set_pmd_at(mm, addr, pmdp, orig_pmd);
> > +             return false;
> > +     }
> > +
> > +     folio_remove_rmap_pmd(folio, page, vma);
> > +     zap_deposited_table(mm, pmdp);
> > +     add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > +     if (vma->vm_flags & VM_LOCKED)
> > +             mlock_drain_local();
> > +     folio_put(folio);
> > +
> > +     return true;
> > +}
> > +
> > +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> > +                        pmd_t *pmdp, struct folio *folio)
> > +{
> > +     VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
> > +     VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> > +     VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
> > +
> > +     if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
> > +             return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
> > +
> > +     return false;
> > +}
> > +
> >   static void remap_page(struct folio *folio, unsigned long nr)
> >   {
> >       int i = 0;
> > diff --git a/mm/rmap.c b/mm/rmap.c
> > index dacf24bc82f0..7d97806f74cd 100644
> > --- a/mm/rmap.c
> > +++ b/mm/rmap.c
> > @@ -1678,16 +1678,23 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> >                       goto walk_abort;
> >               }
> >
> > -             if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> > -                     /*
> > -                      * We temporarily have to drop the PTL and start once
> > -                      * again from that now-PTE-mapped page table.
> > -                      */
> > -                     split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> > -                                           false, folio);
> > -                     flags &= ~TTU_SPLIT_HUGE_PMD;
> > -                     page_vma_mapped_walk_restart(&pvmw);
> > -                     continue;
> > +             if (!pvmw.pte) {
> > +                     if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
> > +                                               folio))
> > +                             goto walk_done;
> > +
> > +                     if (flags & TTU_SPLIT_HUGE_PMD) {
> > +                             /*
> > +                              * We temporarily have to drop the PTL and start
> > +                              * once again from that now-PTE-mapped page
> > +                              * table.
>
> Nit: it's not a PTE-mapped page table.
>
> Maybe
>
> "... restart so we can process the PTE-mapped THP."

Nice. Will adjust as you suggested.

>
>
>
> >               }
> >
> >               /* Unexpected PMD-mapped THP? */
>
> Nothing else jumped at me :)

Thanks again for your time!
Lance

>
> --
> Cheers,
>
> David / dhildenb
>
Lance Yang June 22, 2024, 10 a.m. UTC | #3
Hi Andrew,

I made some minor changes suggested by David[1]. Could you please fold the
following changes into this patch?

[1] https://lore.kernel.org/linux-mm/e7c0aff1-b690-4926-9a34-4e32c9f3faaa@redhat.com/

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4b2817bb2c7d..0cb52ae29259 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2693,21 +2693,11 @@ static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
 					    unsigned long addr, pmd_t *pmdp,
 					    struct folio *folio)
 {
-	VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
-	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
-
 	struct mm_struct *mm = vma->vm_mm;
 	int ref_count, map_count;
 	pmd_t orig_pmd = *pmdp;
 	struct page *page;
 
-	if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
-		return false;
-
-	page = pmd_page(orig_pmd);
-	if (unlikely(page_folio(page) != folio))
-		return false;
-
 	if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
 		return false;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index df1a43295c85..b358501fb7e8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1678,9 +1678,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 
 			if (flags & TTU_SPLIT_HUGE_PMD) {
 				/*
-				 * We temporarily have to drop the PTL and start
-				 * once again from that now-PTE-mapped page
-				 * table.
+				 * We temporarily have to drop the PTL and
+				 * restart so we can process the PTE-mapped THP.
 				 */
 				split_huge_pmd_locked(vma, pvmw.address,
 						      pvmw.pmd, false, folio);
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9f720b0731c4..212cca384d7e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -430,6 +430,8 @@  static inline bool thp_migration_supported(void)
 
 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
 			   pmd_t *pmd, bool freeze, struct folio *folio);
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+			   pmd_t *pmdp, struct folio *folio);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -497,6 +499,13 @@  static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
 					 unsigned long address, pmd_t *pmd,
 					 bool freeze, struct folio *folio) {}
 
+static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
+					 unsigned long addr, pmd_t *pmdp,
+					 struct folio *folio)
+{
+	return false;
+}
+
 #define split_huge_pud(__vma, __pmd, __address)	\
 	do { } while (0)
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e766d3f3a302..425374ae06ed 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2688,6 +2688,82 @@  static void unmap_folio(struct folio *folio)
 	try_to_unmap_flush();
 }
 
+static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
+					    unsigned long addr, pmd_t *pmdp,
+					    struct folio *folio)
+{
+	VM_WARN_ON_FOLIO(folio_test_swapbacked(folio), folio);
+	VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+
+	struct mm_struct *mm = vma->vm_mm;
+	int ref_count, map_count;
+	pmd_t orig_pmd = *pmdp;
+	struct page *page;
+
+	if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
+		return false;
+
+	page = pmd_page(orig_pmd);
+	if (unlikely(page_folio(page) != folio))
+		return false;
+
+	if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
+		return false;
+
+	orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
+
+	/*
+	 * Syncing against concurrent GUP-fast:
+	 * - clear PMD; barrier; read refcount
+	 * - inc refcount; barrier; read PMD
+	 */
+	smp_mb();
+
+	ref_count = folio_ref_count(folio);
+	map_count = folio_mapcount(folio);
+
+	/*
+	 * Order reads for folio refcount and dirty flag
+	 * (see comments in __remove_mapping()).
+	 */
+	smp_rmb();
+
+	/*
+	 * If the folio or its PMD is redirtied at this point, or if there
+	 * are unexpected references, we will give up to discard this folio
+	 * and remap it.
+	 *
+	 * The only folio refs must be one from isolation plus the rmap(s).
+	 */
+	if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
+	    ref_count != map_count + 1) {
+		set_pmd_at(mm, addr, pmdp, orig_pmd);
+		return false;
+	}
+
+	folio_remove_rmap_pmd(folio, page, vma);
+	zap_deposited_table(mm, pmdp);
+	add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+	if (vma->vm_flags & VM_LOCKED)
+		mlock_drain_local();
+	folio_put(folio);
+
+	return true;
+}
+
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+			   pmd_t *pmdp, struct folio *folio)
+{
+	VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
+	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+	VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
+
+	if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
+		return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
+
+	return false;
+}
+
 static void remap_page(struct folio *folio, unsigned long nr)
 {
 	int i = 0;
diff --git a/mm/rmap.c b/mm/rmap.c
index dacf24bc82f0..7d97806f74cd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1678,16 +1678,23 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 			goto walk_abort;
 		}
 
-		if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
-			/*
-			 * We temporarily have to drop the PTL and start once
-			 * again from that now-PTE-mapped page table.
-			 */
-			split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
-					      false, folio);
-			flags &= ~TTU_SPLIT_HUGE_PMD;
-			page_vma_mapped_walk_restart(&pvmw);
-			continue;
+		if (!pvmw.pte) {
+			if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd,
+						  folio))
+				goto walk_done;
+
+			if (flags & TTU_SPLIT_HUGE_PMD) {
+				/*
+				 * We temporarily have to drop the PTL and start
+				 * once again from that now-PTE-mapped page
+				 * table.
+				 */
+				split_huge_pmd_locked(vma, pvmw.address,
+						      pvmw.pmd, false, folio);
+				flags &= ~TTU_SPLIT_HUGE_PMD;
+				page_vma_mapped_walk_restart(&pvmw);
+				continue;
+			}
 		}
 
 		/* Unexpected PMD-mapped THP? */