diff mbox series

[v3,06/12] khugepaged: introduce khugepaged_scan_bitmap for mTHP support

Message ID 20250414220557.35388-7-npache@redhat.com (mailing list archive)
State New
Headers show
Series khugepaged: mTHP support | expand

Commit Message

Nico Pache April 14, 2025, 10:05 p.m. UTC
khugepaged scans PMD ranges for potential collapse to a hugepage. To add
mTHP support we use this scan to instead record chunks of fully utilized
sections of the PMD.

create a bitmap to represent a PMD in order MIN_MTHP_ORDER chunks.
by default we will set this to order 3. The reasoning is that for 4K 512
PMD size this results in a 64 bit bitmap which has some optimizations.
For other arches like ARM64 64K, we can set a larger order if needed.

khugepaged_scan_bitmap uses a stack struct to recursively scan a bitmap
that represents chunks of utilized regions. We can then determine what
mTHP size fits best and in the following patch, we set this bitmap while
scanning the PMD.

max_ptes_none is used as a scale to determine how "full" an order must
be before being considered for collapse.

If a order is set to "always" lets always collapse to that order in a
greedy manner.

Signed-off-by: Nico Pache <npache@redhat.com>
---
 include/linux/khugepaged.h |  4 ++
 mm/khugepaged.c            | 94 ++++++++++++++++++++++++++++++++++----
 2 files changed, 89 insertions(+), 9 deletions(-)

Comments

Nico Pache April 14, 2025, 11:06 p.m. UTC | #1
On Mon, Apr 14, 2025 at 4:07 PM Nico Pache <npache@redhat.com> wrote:
>
> khugepaged scans PMD ranges for potential collapse to a hugepage. To add
> mTHP support we use this scan to instead record chunks of fully utilized
> sections of the PMD.
>
> create a bitmap to represent a PMD in order MIN_MTHP_ORDER chunks.
> by default we will set this to order 3. The reasoning is that for 4K 512
> PMD size this results in a 64 bit bitmap which has some optimizations.
> For other arches like ARM64 64K, we can set a larger order if needed.
>
> khugepaged_scan_bitmap uses a stack struct to recursively scan a bitmap
> that represents chunks of utilized regions. We can then determine what
> mTHP size fits best and in the following patch, we set this bitmap while
> scanning the PMD.
>
> max_ptes_none is used as a scale to determine how "full" an order must
> be before being considered for collapse.
>
> If a order is set to "always" lets always collapse to that order in a
> greedy manner.
>
> Signed-off-by: Nico Pache <npache@redhat.com>
> ---
>  include/linux/khugepaged.h |  4 ++
>  mm/khugepaged.c            | 94 ++++++++++++++++++++++++++++++++++----
>  2 files changed, 89 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
> index 1f46046080f5..60d41215bc1a 100644
> --- a/include/linux/khugepaged.h
> +++ b/include/linux/khugepaged.h
> @@ -1,6 +1,10 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
>  #ifndef _LINUX_KHUGEPAGED_H
>  #define _LINUX_KHUGEPAGED_H
> +#define KHUGEPAGED_MIN_MTHP_ORDER      3
Somehow managed to drop
#define KHUGEPAGED_MIN_MTHP_ORDER 2
When cleaning up my patches.

Sending a V4 of just this patch in reply to this email.

Sorry for the noise...



> +#define KHUGEPAGED_MIN_MTHP_NR (1<<KHUGEPAGED_MIN_MTHP_ORDER)
> +#define MAX_MTHP_BITMAP_SIZE  (1 << (ilog2(MAX_PTRS_PER_PTE) - KHUGEPAGED_MIN_MTHP_ORDER))
> +#define MTHP_BITMAP_SIZE  (1 << (HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER))
>
>  extern unsigned int khugepaged_max_ptes_none __read_mostly;
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index dfecedc6a515..5a3be30096fc 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -94,6 +94,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
>
>  static struct kmem_cache *mm_slot_cache __ro_after_init;
>
> +struct scan_bit_state {
> +       u8 order;
> +       u16 offset;
> +};
> +
>  struct collapse_control {
>         bool is_khugepaged;
>
> @@ -102,6 +107,18 @@ struct collapse_control {
>
>         /* nodemask for allocation fallback */
>         nodemask_t alloc_nmask;
> +
> +       /*
> +        * bitmap used to collapse mTHP sizes.
> +        * 1bit = order KHUGEPAGED_MIN_MTHP_ORDER mTHP
> +        */
> +       DECLARE_BITMAP(mthp_bitmap, MAX_MTHP_BITMAP_SIZE);
> +       DECLARE_BITMAP(mthp_bitmap_temp, MAX_MTHP_BITMAP_SIZE);
> +       struct scan_bit_state mthp_bitmap_stack[MAX_MTHP_BITMAP_SIZE];
> +};
> +
> +struct collapse_control khugepaged_collapse_control = {
> +       .is_khugepaged = true,
>  };
>
>  /**
> @@ -851,10 +868,6 @@ static void khugepaged_alloc_sleep(void)
>         remove_wait_queue(&khugepaged_wait, &wait);
>  }
>
> -struct collapse_control khugepaged_collapse_control = {
> -       .is_khugepaged = true,
> -};
> -
>  static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
>  {
>         int i;
> @@ -1118,7 +1131,8 @@ static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
>
>  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>                               int referenced, int unmapped,
> -                             struct collapse_control *cc)
> +                             struct collapse_control *cc, bool *mmap_locked,
> +                                 u8 order, u16 offset)
>  {
>         LIST_HEAD(compound_pagelist);
>         pmd_t *pmd, _pmd;
> @@ -1137,8 +1151,12 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>          * The allocation can take potentially a long time if it involves
>          * sync compaction, and we do not need to hold the mmap_lock during
>          * that. We will recheck the vma after taking it again in write mode.
> +        * If collapsing mTHPs we may have already released the read_lock.
>          */
> -       mmap_read_unlock(mm);
> +       if (*mmap_locked) {
> +               mmap_read_unlock(mm);
> +               *mmap_locked = false;
> +       }
>
>         result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
>         if (result != SCAN_SUCCEED)
> @@ -1273,12 +1291,72 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>  out_up_write:
>         mmap_write_unlock(mm);
>  out_nolock:
> +       *mmap_locked = false;
>         if (folio)
>                 folio_put(folio);
>         trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
>         return result;
>  }
>
> +// Recursive function to consume the bitmap
> +static int khugepaged_scan_bitmap(struct mm_struct *mm, unsigned long address,
> +                       int referenced, int unmapped, struct collapse_control *cc,
> +                       bool *mmap_locked, unsigned long enabled_orders)
> +{
> +       u8 order, next_order;
> +       u16 offset, mid_offset;
> +       int num_chunks;
> +       int bits_set, threshold_bits;
> +       int top = -1;
> +       int collapsed = 0;
> +       int ret;
> +       struct scan_bit_state state;
> +       bool is_pmd_only = (enabled_orders == (1 << HPAGE_PMD_ORDER));
> +
> +       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> +               { HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER, 0 };
> +
> +       while (top >= 0) {
> +               state = cc->mthp_bitmap_stack[top--];
> +               order = state.order + KHUGEPAGED_MIN_MTHP_ORDER;
> +               offset = state.offset;
> +               num_chunks = 1 << (state.order);
> +               // Skip mTHP orders that are not enabled
> +               if (!test_bit(order, &enabled_orders))
> +                       goto next;
> +
> +               // copy the relavant section to a new bitmap
> +               bitmap_shift_right(cc->mthp_bitmap_temp, cc->mthp_bitmap, offset,
> +                                 MTHP_BITMAP_SIZE);
> +
> +               bits_set = bitmap_weight(cc->mthp_bitmap_temp, num_chunks);
> +               threshold_bits = (HPAGE_PMD_NR - khugepaged_max_ptes_none - 1)
> +                               >> (HPAGE_PMD_ORDER - state.order);
> +
> +               //Check if the region is "almost full" based on the threshold
> +               if (bits_set > threshold_bits || is_pmd_only
> +                       || test_bit(order, &huge_anon_orders_always)) {
> +                       ret = collapse_huge_page(mm, address, referenced, unmapped, cc,
> +                                       mmap_locked, order, offset * KHUGEPAGED_MIN_MTHP_NR);
> +                       if (ret == SCAN_SUCCEED) {
> +                               collapsed += (1 << order);
> +                               continue;
> +                       }
> +               }
> +
> +next:
> +               if (state.order > 0) {
> +                       next_order = state.order - 1;
> +                       mid_offset = offset + (num_chunks / 2);
> +                       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> +                               { next_order, mid_offset };
> +                       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> +                               { next_order, offset };
> +                       }
> +       }
> +       return collapsed;
> +}
> +
>  static int khugepaged_scan_pmd(struct mm_struct *mm,
>                                    struct vm_area_struct *vma,
>                                    unsigned long address, bool *mmap_locked,
> @@ -1445,9 +1523,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
>         pte_unmap_unlock(pte, ptl);
>         if (result == SCAN_SUCCEED) {
>                 result = collapse_huge_page(mm, address, referenced,
> -                                           unmapped, cc);
> -               /* collapse_huge_page will return with the mmap_lock released */
> -               *mmap_locked = false;
> +                                           unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
>         }
>  out:
>         trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
> --
> 2.48.1
>
Nico Pache April 14, 2025, 11:40 p.m. UTC | #2
On Mon, Apr 14, 2025 at 5:06 PM Nico Pache <npache@redhat.com> wrote:
>
> On Mon, Apr 14, 2025 at 4:07 PM Nico Pache <npache@redhat.com> wrote:
> >
> > khugepaged scans PMD ranges for potential collapse to a hugepage. To add
> > mTHP support we use this scan to instead record chunks of fully utilized
> > sections of the PMD.
> >
> > create a bitmap to represent a PMD in order MIN_MTHP_ORDER chunks.
> > by default we will set this to order 3. The reasoning is that for 4K 512
> > PMD size this results in a 64 bit bitmap which has some optimizations.
> > For other arches like ARM64 64K, we can set a larger order if needed.
> >
> > khugepaged_scan_bitmap uses a stack struct to recursively scan a bitmap
> > that represents chunks of utilized regions. We can then determine what
> > mTHP size fits best and in the following patch, we set this bitmap while
> > scanning the PMD.
> >
> > max_ptes_none is used as a scale to determine how "full" an order must
> > be before being considered for collapse.
> >
> > If a order is set to "always" lets always collapse to that order in a
> > greedy manner.
> >
> > Signed-off-by: Nico Pache <npache@redhat.com>
> > ---
> >  include/linux/khugepaged.h |  4 ++
> >  mm/khugepaged.c            | 94 ++++++++++++++++++++++++++++++++++----
> >  2 files changed, 89 insertions(+), 9 deletions(-)
> >
> > diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
> > index 1f46046080f5..60d41215bc1a 100644
> > --- a/include/linux/khugepaged.h
> > +++ b/include/linux/khugepaged.h
> > @@ -1,6 +1,10 @@
> >  /* SPDX-License-Identifier: GPL-2.0 */
> >  #ifndef _LINUX_KHUGEPAGED_H
> >  #define _LINUX_KHUGEPAGED_H
> > +#define KHUGEPAGED_MIN_MTHP_ORDER      3
> Somehow managed to drop
> #define KHUGEPAGED_MIN_MTHP_ORDER 2
> When cleaning up my patches.
>
> Sending a V4 of just this patch in reply to this email.
>
> Sorry for the noise...

Sorry more noise...

The #define KHUGEPAGED_MIN_MTHP_ORDER 2 fixup got merged into the
wrong commit, and is actually in 07/12. If we take this V4, the merge
will clean up the 07/12 commit with no additional changes.

If sending out a V4 of 07/12 is needed please let me know.
>
>
>
> > +#define KHUGEPAGED_MIN_MTHP_NR (1<<KHUGEPAGED_MIN_MTHP_ORDER)
> > +#define MAX_MTHP_BITMAP_SIZE  (1 << (ilog2(MAX_PTRS_PER_PTE) - KHUGEPAGED_MIN_MTHP_ORDER))
> > +#define MTHP_BITMAP_SIZE  (1 << (HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER))
> >
> >  extern unsigned int khugepaged_max_ptes_none __read_mostly;
> >  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index dfecedc6a515..5a3be30096fc 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -94,6 +94,11 @@ static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
> >
> >  static struct kmem_cache *mm_slot_cache __ro_after_init;
> >
> > +struct scan_bit_state {
> > +       u8 order;
> > +       u16 offset;
> > +};
> > +
> >  struct collapse_control {
> >         bool is_khugepaged;
> >
> > @@ -102,6 +107,18 @@ struct collapse_control {
> >
> >         /* nodemask for allocation fallback */
> >         nodemask_t alloc_nmask;
> > +
> > +       /*
> > +        * bitmap used to collapse mTHP sizes.
> > +        * 1bit = order KHUGEPAGED_MIN_MTHP_ORDER mTHP
> > +        */
> > +       DECLARE_BITMAP(mthp_bitmap, MAX_MTHP_BITMAP_SIZE);
> > +       DECLARE_BITMAP(mthp_bitmap_temp, MAX_MTHP_BITMAP_SIZE);
> > +       struct scan_bit_state mthp_bitmap_stack[MAX_MTHP_BITMAP_SIZE];
> > +};
> > +
> > +struct collapse_control khugepaged_collapse_control = {
> > +       .is_khugepaged = true,
> >  };
> >
> >  /**
> > @@ -851,10 +868,6 @@ static void khugepaged_alloc_sleep(void)
> >         remove_wait_queue(&khugepaged_wait, &wait);
> >  }
> >
> > -struct collapse_control khugepaged_collapse_control = {
> > -       .is_khugepaged = true,
> > -};
> > -
> >  static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
> >  {
> >         int i;
> > @@ -1118,7 +1131,8 @@ static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
> >
> >  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> >                               int referenced, int unmapped,
> > -                             struct collapse_control *cc)
> > +                             struct collapse_control *cc, bool *mmap_locked,
> > +                                 u8 order, u16 offset)
> >  {
> >         LIST_HEAD(compound_pagelist);
> >         pmd_t *pmd, _pmd;
> > @@ -1137,8 +1151,12 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> >          * The allocation can take potentially a long time if it involves
> >          * sync compaction, and we do not need to hold the mmap_lock during
> >          * that. We will recheck the vma after taking it again in write mode.
> > +        * If collapsing mTHPs we may have already released the read_lock.
> >          */
> > -       mmap_read_unlock(mm);
> > +       if (*mmap_locked) {
> > +               mmap_read_unlock(mm);
> > +               *mmap_locked = false;
> > +       }
> >
> >         result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
> >         if (result != SCAN_SUCCEED)
> > @@ -1273,12 +1291,72 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> >  out_up_write:
> >         mmap_write_unlock(mm);
> >  out_nolock:
> > +       *mmap_locked = false;
> >         if (folio)
> >                 folio_put(folio);
> >         trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
> >         return result;
> >  }
> >
> > +// Recursive function to consume the bitmap
> > +static int khugepaged_scan_bitmap(struct mm_struct *mm, unsigned long address,
> > +                       int referenced, int unmapped, struct collapse_control *cc,
> > +                       bool *mmap_locked, unsigned long enabled_orders)
> > +{
> > +       u8 order, next_order;
> > +       u16 offset, mid_offset;
> > +       int num_chunks;
> > +       int bits_set, threshold_bits;
> > +       int top = -1;
> > +       int collapsed = 0;
> > +       int ret;
> > +       struct scan_bit_state state;
> > +       bool is_pmd_only = (enabled_orders == (1 << HPAGE_PMD_ORDER));
> > +
> > +       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> > +               { HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER, 0 };
> > +
> > +       while (top >= 0) {
> > +               state = cc->mthp_bitmap_stack[top--];
> > +               order = state.order + KHUGEPAGED_MIN_MTHP_ORDER;
> > +               offset = state.offset;
> > +               num_chunks = 1 << (state.order);
> > +               // Skip mTHP orders that are not enabled
> > +               if (!test_bit(order, &enabled_orders))
> > +                       goto next;
> > +
> > +               // copy the relavant section to a new bitmap
> > +               bitmap_shift_right(cc->mthp_bitmap_temp, cc->mthp_bitmap, offset,
> > +                                 MTHP_BITMAP_SIZE);
> > +
> > +               bits_set = bitmap_weight(cc->mthp_bitmap_temp, num_chunks);
> > +               threshold_bits = (HPAGE_PMD_NR - khugepaged_max_ptes_none - 1)
> > +                               >> (HPAGE_PMD_ORDER - state.order);
> > +
> > +               //Check if the region is "almost full" based on the threshold
> > +               if (bits_set > threshold_bits || is_pmd_only
> > +                       || test_bit(order, &huge_anon_orders_always)) {
> > +                       ret = collapse_huge_page(mm, address, referenced, unmapped, cc,
> > +                                       mmap_locked, order, offset * KHUGEPAGED_MIN_MTHP_NR);
> > +                       if (ret == SCAN_SUCCEED) {
> > +                               collapsed += (1 << order);
> > +                               continue;
> > +                       }
> > +               }
> > +
> > +next:
> > +               if (state.order > 0) {
> > +                       next_order = state.order - 1;
> > +                       mid_offset = offset + (num_chunks / 2);
> > +                       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> > +                               { next_order, mid_offset };
> > +                       cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
> > +                               { next_order, offset };
> > +                       }
> > +       }
> > +       return collapsed;
> > +}
> > +
> >  static int khugepaged_scan_pmd(struct mm_struct *mm,
> >                                    struct vm_area_struct *vma,
> >                                    unsigned long address, bool *mmap_locked,
> > @@ -1445,9 +1523,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
> >         pte_unmap_unlock(pte, ptl);
> >         if (result == SCAN_SUCCEED) {
> >                 result = collapse_huge_page(mm, address, referenced,
> > -                                           unmapped, cc);
> > -               /* collapse_huge_page will return with the mmap_lock released */
> > -               *mmap_locked = false;
> > +                                           unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
> >         }
> >  out:
> >         trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
> > --
> > 2.48.1
> >
diff mbox series

Patch

diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 1f46046080f5..60d41215bc1a 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -1,6 +1,10 @@ 
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_KHUGEPAGED_H
 #define _LINUX_KHUGEPAGED_H
+#define KHUGEPAGED_MIN_MTHP_ORDER	3
+#define KHUGEPAGED_MIN_MTHP_NR	(1<<KHUGEPAGED_MIN_MTHP_ORDER)
+#define MAX_MTHP_BITMAP_SIZE  (1 << (ilog2(MAX_PTRS_PER_PTE) - KHUGEPAGED_MIN_MTHP_ORDER))
+#define MTHP_BITMAP_SIZE  (1 << (HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER))
 
 extern unsigned int khugepaged_max_ptes_none __read_mostly;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index dfecedc6a515..5a3be30096fc 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -94,6 +94,11 @@  static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
 
 static struct kmem_cache *mm_slot_cache __ro_after_init;
 
+struct scan_bit_state {
+	u8 order;
+	u16 offset;
+};
+
 struct collapse_control {
 	bool is_khugepaged;
 
@@ -102,6 +107,18 @@  struct collapse_control {
 
 	/* nodemask for allocation fallback */
 	nodemask_t alloc_nmask;
+
+	/*
+	 * bitmap used to collapse mTHP sizes.
+	 * 1bit = order KHUGEPAGED_MIN_MTHP_ORDER mTHP
+	 */
+	DECLARE_BITMAP(mthp_bitmap, MAX_MTHP_BITMAP_SIZE);
+	DECLARE_BITMAP(mthp_bitmap_temp, MAX_MTHP_BITMAP_SIZE);
+	struct scan_bit_state mthp_bitmap_stack[MAX_MTHP_BITMAP_SIZE];
+};
+
+struct collapse_control khugepaged_collapse_control = {
+	.is_khugepaged = true,
 };
 
 /**
@@ -851,10 +868,6 @@  static void khugepaged_alloc_sleep(void)
 	remove_wait_queue(&khugepaged_wait, &wait);
 }
 
-struct collapse_control khugepaged_collapse_control = {
-	.is_khugepaged = true,
-};
-
 static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
 {
 	int i;
@@ -1118,7 +1131,8 @@  static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
 
 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 			      int referenced, int unmapped,
-			      struct collapse_control *cc)
+			      struct collapse_control *cc, bool *mmap_locked,
+				  u8 order, u16 offset)
 {
 	LIST_HEAD(compound_pagelist);
 	pmd_t *pmd, _pmd;
@@ -1137,8 +1151,12 @@  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	 * The allocation can take potentially a long time if it involves
 	 * sync compaction, and we do not need to hold the mmap_lock during
 	 * that. We will recheck the vma after taking it again in write mode.
+	 * If collapsing mTHPs we may have already released the read_lock.
 	 */
-	mmap_read_unlock(mm);
+	if (*mmap_locked) {
+		mmap_read_unlock(mm);
+		*mmap_locked = false;
+	}
 
 	result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
 	if (result != SCAN_SUCCEED)
@@ -1273,12 +1291,72 @@  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 out_up_write:
 	mmap_write_unlock(mm);
 out_nolock:
+	*mmap_locked = false;
 	if (folio)
 		folio_put(folio);
 	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
 	return result;
 }
 
+// Recursive function to consume the bitmap
+static int khugepaged_scan_bitmap(struct mm_struct *mm, unsigned long address,
+			int referenced, int unmapped, struct collapse_control *cc,
+			bool *mmap_locked, unsigned long enabled_orders)
+{
+	u8 order, next_order;
+	u16 offset, mid_offset;
+	int num_chunks;
+	int bits_set, threshold_bits;
+	int top = -1;
+	int collapsed = 0;
+	int ret;
+	struct scan_bit_state state;
+	bool is_pmd_only = (enabled_orders == (1 << HPAGE_PMD_ORDER));
+
+	cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
+		{ HPAGE_PMD_ORDER - KHUGEPAGED_MIN_MTHP_ORDER, 0 };
+
+	while (top >= 0) {
+		state = cc->mthp_bitmap_stack[top--];
+		order = state.order + KHUGEPAGED_MIN_MTHP_ORDER;
+		offset = state.offset;
+		num_chunks = 1 << (state.order);
+		// Skip mTHP orders that are not enabled
+		if (!test_bit(order, &enabled_orders))
+			goto next;
+
+		// copy the relavant section to a new bitmap
+		bitmap_shift_right(cc->mthp_bitmap_temp, cc->mthp_bitmap, offset,
+				  MTHP_BITMAP_SIZE);
+
+		bits_set = bitmap_weight(cc->mthp_bitmap_temp, num_chunks);
+		threshold_bits = (HPAGE_PMD_NR - khugepaged_max_ptes_none - 1)
+				>> (HPAGE_PMD_ORDER - state.order);
+
+		//Check if the region is "almost full" based on the threshold
+		if (bits_set > threshold_bits || is_pmd_only
+			|| test_bit(order, &huge_anon_orders_always)) {
+			ret = collapse_huge_page(mm, address, referenced, unmapped, cc,
+					mmap_locked, order, offset * KHUGEPAGED_MIN_MTHP_NR);
+			if (ret == SCAN_SUCCEED) {
+				collapsed += (1 << order);
+				continue;
+			}
+		}
+
+next:
+		if (state.order > 0) {
+			next_order = state.order - 1;
+			mid_offset = offset + (num_chunks / 2);
+			cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
+				{ next_order, mid_offset };
+			cc->mthp_bitmap_stack[++top] = (struct scan_bit_state)
+				{ next_order, offset };
+			}
+	}
+	return collapsed;
+}
+
 static int khugepaged_scan_pmd(struct mm_struct *mm,
 				   struct vm_area_struct *vma,
 				   unsigned long address, bool *mmap_locked,
@@ -1445,9 +1523,7 @@  static int khugepaged_scan_pmd(struct mm_struct *mm,
 	pte_unmap_unlock(pte, ptl);
 	if (result == SCAN_SUCCEED) {
 		result = collapse_huge_page(mm, address, referenced,
-					    unmapped, cc);
-		/* collapse_huge_page will return with the mmap_lock released */
-		*mmap_locked = false;
+					    unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
 	}
 out:
 	trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,