diff mbox series

[v3,06/16] mm/mmap: Change munmap to use vma_munmap_struct() for accounting and surrounding vmas

Message ID 20240704182718.2653918-7-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Avoid MAP_FIXED gap exposure | expand

Commit Message

Liam R. Howlett July 4, 2024, 6:27 p.m. UTC
Clean up the code by changing the munmap operation to use a structure
for the accounting and munmap variables.

Since remove_mt() is only called in one location and the contents will
be reduce to almost nothing.  The remains of the function can be added
to vms_complete_munmap_vmas().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/internal.h |  6 ++++
 mm/mmap.c     | 81 ++++++++++++++++++++++++++-------------------------
 2 files changed, 47 insertions(+), 40 deletions(-)

Comments

Lorenzo Stoakes July 5, 2024, 7:27 p.m. UTC | #1
On Thu, Jul 04, 2024 at 02:27:08PM GMT, Liam R. Howlett wrote:
> Clean up the code by changing the munmap operation to use a structure
> for the accounting and munmap variables.
>
> Since remove_mt() is only called in one location and the contents will
> be reduce to almost nothing.  The remains of the function can be added
> to vms_complete_munmap_vmas().
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
>  mm/internal.h |  6 ++++
>  mm/mmap.c     | 81 ++++++++++++++++++++++++++-------------------------
>  2 files changed, 47 insertions(+), 40 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index f1e6dea2efcf..8cbbbe7d40f3 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1488,12 +1488,18 @@ struct vma_munmap_struct {
>  	struct vma_iterator *vmi;
>  	struct mm_struct *mm;
>  	struct vm_area_struct *vma;	/* The first vma to munmap */
> +	struct vm_area_struct *next;	/* vma after the munmap area */
> +	struct vm_area_struct *prev;    /* vma before the munmap area */

I mean this is about as pedantic as it gets, and, admittedly an annoying
comment to make, but the ordering... can't we at least put prev before
next? ;)

This is actually a comment you can fully disregard, by the way!

>  	struct list_head *uf;		/* Userfaultfd list_head */
>  	unsigned long start;		/* Aligned start addr */
>  	unsigned long end;		/* Aligned end addr */
>  	int vma_count;			/* Number of vmas that will be removed */
>  	unsigned long nr_pages;		/* Number of pages being removed */
>  	unsigned long locked_vm;	/* Number of locked pages */
> +	unsigned long nr_accounted;	/* Number of VM_ACCOUNT pages */
> +	unsigned long exec_vm;
> +	unsigned long stack_vm;
> +	unsigned long data_vm;
>  	bool unlock;			/* Unlock after the munmap */
>  };
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 76e93146ee9d..2a1a49f98fa3 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -523,7 +523,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
>  	vms->unlock = unlock;
>  	vms->uf = uf;
>  	vms->vma_count = 0;
> -	vms->nr_pages = vms->locked_vm = 0;
> +	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
> +	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
>  }
>
>  /*
> @@ -2388,30 +2389,6 @@ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
>  	return vma;
>  }
>
> -/*
> - * Ok - we have the memory areas we should free on a maple tree so release them,
> - * and do the vma updates.
> - *
> - * Called with the mm semaphore held.
> - */
> -static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
> -{
> -	unsigned long nr_accounted = 0;
> -	struct vm_area_struct *vma;
> -
> -	/* Update high watermark before we lower total_vm */
> -	update_hiwater_vm(mm);
> -	mas_for_each(mas, vma, ULONG_MAX) {
> -		long nrpages = vma_pages(vma);
> -
> -		if (vma->vm_flags & VM_ACCOUNT)
> -			nr_accounted += nrpages;
> -		vm_stat_account(mm, vma->vm_flags, -nrpages);
> -		remove_vma(vma, false);
> -	}
> -	vm_unacct_memory(nr_accounted);
> -}
> -
>  /*
>   * Get rid of page table information in the indicated region.
>   *
> @@ -2634,12 +2611,15 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
>   * vms_complete_munmap_vmas() - Finish the munmap() operation
>   * @vms: The vma munmap struct
>   * @mas_detach: The maple state of the detached vmas
> + *
> + * This function updates the mm_struct, unmaps the region, frees the resources
> + * used for the munmap() and may downgrade the lock - if requested.  Everything
> + * needed to be done once the vma maple tree is updated.
>   */
> -
>  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
>  		struct ma_state *mas_detach)
>  {
> -	struct vm_area_struct *prev, *next;
> +	struct vm_area_struct *vma;
>  	struct mm_struct *mm;
>
>  	mm = vms->mm;
> @@ -2648,21 +2628,26 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
>  	if (vms->unlock)
>  		mmap_write_downgrade(mm);
>
> -	prev = vma_iter_prev_range(vms->vmi);
> -	next = vma_next(vms->vmi);
> -	if (next)
> -		vma_iter_prev_range(vms->vmi);
> -
>  	/*
>  	 * We can free page tables without write-locking mmap_lock because VMAs
>  	 * were isolated before we downgraded mmap_lock.
>  	 */
>  	mas_set(mas_detach, 1);
> -	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
> -		     vms->vma_count, !vms->unlock);
> -	/* Statistics and freeing VMAs */
> +	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
> +		     vms->start, vms->end, vms->vma_count, !vms->unlock);
> +	/* Update high watermark before we lower total_vm */
> +	update_hiwater_vm(mm);
> +	/* Stat accounting */
> +	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
> +	mm->exec_vm -= vms->exec_vm;
> +	mm->stack_vm -= vms->stack_vm;
> +	mm->data_vm -= vms->data_vm;

OK I was going to ask you're READ_ONCE()'ing and WRITE_ONCE()'ing
mm->total_vm but not the other statistics, but the original
vm_stat_account() does this so. Yeah :)

> +	/* Remove and clean up vmas */
>  	mas_set(mas_detach, 0);
> -	remove_mt(mm, mas_detach);
> +	mas_for_each(mas_detach, vma, ULONG_MAX)
> +		remove_vma(vma, false);
> +
> +	vm_unacct_memory(vms->nr_accounted);
>  	validate_mm(mm);
>  	if (vms->unlock)
>  		mmap_read_unlock(mm);
> @@ -2710,13 +2695,14 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>  		if (error)
>  			goto start_split_failed;
>  	}
> +	vms->prev = vma_prev(vms->vmi);
>
>  	/*
>  	 * Detach a range of VMAs from the mm. Using next as a temp variable as
>  	 * it is always overwritten.
>  	 */
> -	next = vms->vma;
> -	do {
> +	for_each_vma_range(*(vms->vmi), next, vms->end) {
> +		long nrpages;
>  		/* Does it split the end? */
>  		if (next->vm_end > vms->end) {
>  			error = __split_vma(vms->vmi, next, vms->end, 0);
> @@ -2725,8 +2711,21 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>  		}
>  		vma_start_write(next);
>  		mas_set(mas_detach, vms->vma_count++);
> +		nrpages = vma_pages(next);
> +
> +		vms->nr_pages += nrpages;
>  		if (next->vm_flags & VM_LOCKED)
> -			vms->locked_vm += vma_pages(next);
> +			vms->locked_vm += nrpages;
> +
> +		if (next->vm_flags & VM_ACCOUNT)
> +			vms->nr_accounted += nrpages;
> +
> +		if (is_exec_mapping(next->vm_flags))
> +			vms->exec_vm += nrpages;
> +		else if (is_stack_mapping(next->vm_flags))
> +			vms->stack_vm += nrpages;
> +		else if (is_data_mapping(next->vm_flags))
> +			vms->data_vm += nrpages;
>
>  		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
>  		if (error)
> @@ -2752,7 +2751,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>  		BUG_ON(next->vm_start < vms->start);
>  		BUG_ON(next->vm_start > vms->end);
>  #endif
> -	} for_each_vma_range(*(vms->vmi), next, vms->end);
> +	}
> +
> +	vms->next = vma_next(vms->vmi);
>
>  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
>  	/* Make sure no VMAs are about to be lost. */
> --
> 2.43.0
>

This is a big improvement overall, very fiddly code.

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Liam R. Howlett July 5, 2024, 7:59 p.m. UTC | #2
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [240705 15:27]:
> On Thu, Jul 04, 2024 at 02:27:08PM GMT, Liam R. Howlett wrote:
> > Clean up the code by changing the munmap operation to use a structure
> > for the accounting and munmap variables.
> >
> > Since remove_mt() is only called in one location and the contents will
> > be reduce to almost nothing.  The remains of the function can be added
> > to vms_complete_munmap_vmas().
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> > ---
> >  mm/internal.h |  6 ++++
> >  mm/mmap.c     | 81 ++++++++++++++++++++++++++-------------------------
> >  2 files changed, 47 insertions(+), 40 deletions(-)
> >
> > diff --git a/mm/internal.h b/mm/internal.h
> > index f1e6dea2efcf..8cbbbe7d40f3 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -1488,12 +1488,18 @@ struct vma_munmap_struct {
> >  	struct vma_iterator *vmi;
> >  	struct mm_struct *mm;
> >  	struct vm_area_struct *vma;	/* The first vma to munmap */
> > +	struct vm_area_struct *next;	/* vma after the munmap area */
> > +	struct vm_area_struct *prev;    /* vma before the munmap area */
> 
> I mean this is about as pedantic as it gets, and, admittedly an annoying
> comment to make, but the ordering... can't we at least put prev before
> next? ;)

I can do that, no problem.
...

> 
> This is a big improvement overall, very fiddly code.
> 
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

Thanks!
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index f1e6dea2efcf..8cbbbe7d40f3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1488,12 +1488,18 @@  struct vma_munmap_struct {
 	struct vma_iterator *vmi;
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;	/* The first vma to munmap */
+	struct vm_area_struct *next;	/* vma after the munmap area */
+	struct vm_area_struct *prev;    /* vma before the munmap area */
 	struct list_head *uf;		/* Userfaultfd list_head */
 	unsigned long start;		/* Aligned start addr */
 	unsigned long end;		/* Aligned end addr */
 	int vma_count;			/* Number of vmas that will be removed */
 	unsigned long nr_pages;		/* Number of pages being removed */
 	unsigned long locked_vm;	/* Number of locked pages */
+	unsigned long nr_accounted;	/* Number of VM_ACCOUNT pages */
+	unsigned long exec_vm;
+	unsigned long stack_vm;
+	unsigned long data_vm;
 	bool unlock;			/* Unlock after the munmap */
 };
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 76e93146ee9d..2a1a49f98fa3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -523,7 +523,8 @@  static inline void init_vma_munmap(struct vma_munmap_struct *vms,
 	vms->unlock = unlock;
 	vms->uf = uf;
 	vms->vma_count = 0;
-	vms->nr_pages = vms->locked_vm = 0;
+	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
 }
 
 /*
@@ -2388,30 +2389,6 @@  struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 	return vma;
 }
 
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
-	unsigned long nr_accounted = 0;
-	struct vm_area_struct *vma;
-
-	/* Update high watermark before we lower total_vm */
-	update_hiwater_vm(mm);
-	mas_for_each(mas, vma, ULONG_MAX) {
-		long nrpages = vma_pages(vma);
-
-		if (vma->vm_flags & VM_ACCOUNT)
-			nr_accounted += nrpages;
-		vm_stat_account(mm, vma->vm_flags, -nrpages);
-		remove_vma(vma, false);
-	}
-	vm_unacct_memory(nr_accounted);
-}
-
 /*
  * Get rid of page table information in the indicated region.
  *
@@ -2634,12 +2611,15 @@  static inline void abort_munmap_vmas(struct ma_state *mas_detach)
  * vms_complete_munmap_vmas() - Finish the munmap() operation
  * @vms: The vma munmap struct
  * @mas_detach: The maple state of the detached vmas
+ *
+ * This function updates the mm_struct, unmaps the region, frees the resources
+ * used for the munmap() and may downgrade the lock - if requested.  Everything
+ * needed to be done once the vma maple tree is updated.
  */
-
 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 		struct ma_state *mas_detach)
 {
-	struct vm_area_struct *prev, *next;
+	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 
 	mm = vms->mm;
@@ -2648,21 +2628,26 @@  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 	if (vms->unlock)
 		mmap_write_downgrade(mm);
 
-	prev = vma_iter_prev_range(vms->vmi);
-	next = vma_next(vms->vmi);
-	if (next)
-		vma_iter_prev_range(vms->vmi);
-
 	/*
 	 * We can free page tables without write-locking mmap_lock because VMAs
 	 * were isolated before we downgraded mmap_lock.
 	 */
 	mas_set(mas_detach, 1);
-	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
-		     vms->vma_count, !vms->unlock);
-	/* Statistics and freeing VMAs */
+	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
+		     vms->start, vms->end, vms->vma_count, !vms->unlock);
+	/* Update high watermark before we lower total_vm */
+	update_hiwater_vm(mm);
+	/* Stat accounting */
+	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+	mm->exec_vm -= vms->exec_vm;
+	mm->stack_vm -= vms->stack_vm;
+	mm->data_vm -= vms->data_vm;
+	/* Remove and clean up vmas */
 	mas_set(mas_detach, 0);
-	remove_mt(mm, mas_detach);
+	mas_for_each(mas_detach, vma, ULONG_MAX)
+		remove_vma(vma, false);
+
+	vm_unacct_memory(vms->nr_accounted);
 	validate_mm(mm);
 	if (vms->unlock)
 		mmap_read_unlock(mm);
@@ -2710,13 +2695,14 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		if (error)
 			goto start_split_failed;
 	}
+	vms->prev = vma_prev(vms->vmi);
 
 	/*
 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
 	 * it is always overwritten.
 	 */
-	next = vms->vma;
-	do {
+	for_each_vma_range(*(vms->vmi), next, vms->end) {
+		long nrpages;
 		/* Does it split the end? */
 		if (next->vm_end > vms->end) {
 			error = __split_vma(vms->vmi, next, vms->end, 0);
@@ -2725,8 +2711,21 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		}
 		vma_start_write(next);
 		mas_set(mas_detach, vms->vma_count++);
+		nrpages = vma_pages(next);
+
+		vms->nr_pages += nrpages;
 		if (next->vm_flags & VM_LOCKED)
-			vms->locked_vm += vma_pages(next);
+			vms->locked_vm += nrpages;
+
+		if (next->vm_flags & VM_ACCOUNT)
+			vms->nr_accounted += nrpages;
+
+		if (is_exec_mapping(next->vm_flags))
+			vms->exec_vm += nrpages;
+		else if (is_stack_mapping(next->vm_flags))
+			vms->stack_vm += nrpages;
+		else if (is_data_mapping(next->vm_flags))
+			vms->data_vm += nrpages;
 
 		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
 		if (error)
@@ -2752,7 +2751,9 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		BUG_ON(next->vm_start < vms->start);
 		BUG_ON(next->vm_start > vms->end);
 #endif
-	} for_each_vma_range(*(vms->vmi), next, vms->end);
+	}
+
+	vms->next = vma_next(vms->vmi);
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
 	/* Make sure no VMAs are about to be lost. */