diff mbox series

[3/6] mm/mremap: Convert huge PUD move to separate helper

Message ID 20210610083549.386085-4-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series mremap fixes | expand

Commit Message

Aneesh Kumar K.V June 10, 2021, 8:35 a.m. UTC
With TRANSPARENT_HUGEPAGE_PUD enabled the kernel can find huge PUD entries.
Add a helper to move huge PUD entries on mremap().

This will be used by a later patch to optimize mremap of PUD_SIZE aligned
level 4 PTE mapped address

This also make sure we support mremap on huge PUD entries even with
CONFIG_HAVE_MOVE_PUD disabled.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 mm/mremap.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 73 insertions(+), 7 deletions(-)

Comments

Hugh Dickins June 10, 2021, 10:03 p.m. UTC | #1
On Thu, 10 Jun 2021, Aneesh Kumar K.V wrote:

> With TRANSPARENT_HUGEPAGE_PUD enabled the kernel can find huge PUD entries.
> Add a helper to move huge PUD entries on mremap().
> 
> This will be used by a later patch to optimize mremap of PUD_SIZE aligned
> level 4 PTE mapped address
> 
> This also make sure we support mremap on huge PUD entries even with
> CONFIG_HAVE_MOVE_PUD disabled.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>  mm/mremap.c | 80 ++++++++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 73 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/mremap.c b/mm/mremap.c
> index 47c255b60150..92ab7d24a587 100644
> --- a/mm/mremap.c
> +++ b/mm/mremap.c
> @@ -324,10 +324,62 @@ static inline bool move_normal_pud(struct vm_area_struct *vma,
>  }
>  #endif
>  
> +
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PUD

Should that say
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
?

(I'm a PUD-THP-sceptic, but if it's just for DAX then probably okay.)

> +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
> +			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
> +{
> +	spinlock_t *old_ptl, *new_ptl;
> +	struct mm_struct *mm = vma->vm_mm;
> +	pud_t pud;
> +
> +	/*
> +	 * The destination pud shouldn't be established, free_pgtables()
> +	 * should have released it.
> +	 */
> +	if (WARN_ON_ONCE(!pud_none(*new_pud)))
> +		return false;
> +
> +	/*
> +	 * We don't have to worry about the ordering of src and dst
> +	 * ptlocks because exclusive mmap_lock prevents deadlock.
> +	 */
> +	old_ptl = pud_lock(vma->vm_mm, old_pud);
> +	new_ptl = pud_lockptr(mm, new_pud);
> +	if (new_ptl != old_ptl)
> +		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
> +
> +	/* Clear the pud */
> +	pud = *old_pud;
> +	pud_clear(old_pud);
> +
> +	VM_BUG_ON(!pud_none(*new_pud));
> +
> +	/* Set the new pud */
> +	/* mark soft_ditry when we add pud level soft dirty support */
> +	set_pud_at(mm, new_addr, new_pud, pud);
> +	flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
> +	if (new_ptl != old_ptl)
> +		spin_unlock(new_ptl);
> +	spin_unlock(old_ptl);
> +
> +	return true;
> +}
> +#else
> +static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
> +			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
> +{
> +	WARN_ON_ONCE(1);
> +	return false;
> +
> +}
> +#endif
> +
>  enum pgt_entry {
>  	NORMAL_PMD,
>  	HPAGE_PMD,
>  	NORMAL_PUD,
> +	HPAGE_PUD,
>  };
>  
>  /*
> @@ -347,6 +399,7 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry,
>  		mask = PMD_MASK;
>  		size = PMD_SIZE;
>  		break;
> +	case HPAGE_PUD:
>  	case NORMAL_PUD:
>  		mask = PUD_MASK;
>  		size = PUD_SIZE;
> @@ -395,6 +448,11 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
>  			move_huge_pmd(vma, old_addr, new_addr, old_entry,
>  				      new_entry);
>  		break;
> +	case HPAGE_PUD:
> +		moved = move_huge_pud(vma, old_addr, new_addr, old_entry,
> +				      new_entry);
> +		break;
> +
>  	default:
>  		WARN_ON_ONCE(1);
>  		break;
> @@ -414,6 +472,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
>  	unsigned long extent, old_end;
>  	struct mmu_notifier_range range;
>  	pmd_t *old_pmd, *new_pmd;
> +	pud_t *old_pud, *new_pud;
>  
>  	old_end = old_addr + len;
>  	flush_cache_range(vma, old_addr, old_end);
> @@ -429,15 +488,22 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
>  		 * PUD level if possible.
>  		 */
>  		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
> -		if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
> -			pud_t *old_pud, *new_pud;
>  
> -			old_pud = get_old_pud(vma->vm_mm, old_addr);
> -			if (!old_pud)
> +		old_pud = get_old_pud(vma->vm_mm, old_addr);
> +		if (!old_pud)
> +			continue;
> +		new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
> +		if (!new_pud)
> +			break;
> +		if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
> +			if (extent == HPAGE_PUD_SIZE) {
> +				move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
> +					       old_pud, new_pud, need_rmap_locks);
> +				/* We ignore and continue on error? */
>  				continue;
> -			new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
> -			if (!new_pud)
> -				break;
> +			}
> +		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
> +
>  			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
>  					   old_pud, new_pud, need_rmap_locks))
>  				continue;
> -- 
> 2.31.1
> 
> 
>
diff mbox series

Patch

diff --git a/mm/mremap.c b/mm/mremap.c
index 47c255b60150..92ab7d24a587 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -324,10 +324,62 @@  static inline bool move_normal_pud(struct vm_area_struct *vma,
 }
 #endif
 
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PUD
+static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
+			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+{
+	spinlock_t *old_ptl, *new_ptl;
+	struct mm_struct *mm = vma->vm_mm;
+	pud_t pud;
+
+	/*
+	 * The destination pud shouldn't be established, free_pgtables()
+	 * should have released it.
+	 */
+	if (WARN_ON_ONCE(!pud_none(*new_pud)))
+		return false;
+
+	/*
+	 * We don't have to worry about the ordering of src and dst
+	 * ptlocks because exclusive mmap_lock prevents deadlock.
+	 */
+	old_ptl = pud_lock(vma->vm_mm, old_pud);
+	new_ptl = pud_lockptr(mm, new_pud);
+	if (new_ptl != old_ptl)
+		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+	/* Clear the pud */
+	pud = *old_pud;
+	pud_clear(old_pud);
+
+	VM_BUG_ON(!pud_none(*new_pud));
+
+	/* Set the new pud */
+	/* mark soft_ditry when we add pud level soft dirty support */
+	set_pud_at(mm, new_addr, new_pud, pud);
+	flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
+	if (new_ptl != old_ptl)
+		spin_unlock(new_ptl);
+	spin_unlock(old_ptl);
+
+	return true;
+}
+#else
+static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
+			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
+{
+	WARN_ON_ONCE(1);
+	return false;
+
+}
+#endif
+
 enum pgt_entry {
 	NORMAL_PMD,
 	HPAGE_PMD,
 	NORMAL_PUD,
+	HPAGE_PUD,
 };
 
 /*
@@ -347,6 +399,7 @@  static __always_inline unsigned long get_extent(enum pgt_entry entry,
 		mask = PMD_MASK;
 		size = PMD_SIZE;
 		break;
+	case HPAGE_PUD:
 	case NORMAL_PUD:
 		mask = PUD_MASK;
 		size = PUD_SIZE;
@@ -395,6 +448,11 @@  static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
 			move_huge_pmd(vma, old_addr, new_addr, old_entry,
 				      new_entry);
 		break;
+	case HPAGE_PUD:
+		moved = move_huge_pud(vma, old_addr, new_addr, old_entry,
+				      new_entry);
+		break;
+
 	default:
 		WARN_ON_ONCE(1);
 		break;
@@ -414,6 +472,7 @@  unsigned long move_page_tables(struct vm_area_struct *vma,
 	unsigned long extent, old_end;
 	struct mmu_notifier_range range;
 	pmd_t *old_pmd, *new_pmd;
+	pud_t *old_pud, *new_pud;
 
 	old_end = old_addr + len;
 	flush_cache_range(vma, old_addr, old_end);
@@ -429,15 +488,22 @@  unsigned long move_page_tables(struct vm_area_struct *vma,
 		 * PUD level if possible.
 		 */
 		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
-		if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
-			pud_t *old_pud, *new_pud;
 
-			old_pud = get_old_pud(vma->vm_mm, old_addr);
-			if (!old_pud)
+		old_pud = get_old_pud(vma->vm_mm, old_addr);
+		if (!old_pud)
+			continue;
+		new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
+		if (!new_pud)
+			break;
+		if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
+			if (extent == HPAGE_PUD_SIZE) {
+				move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
+					       old_pud, new_pud, need_rmap_locks);
+				/* We ignore and continue on error? */
 				continue;
-			new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
-			if (!new_pud)
-				break;
+			}
+		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
+
 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
 					   old_pud, new_pud, need_rmap_locks))
 				continue;