diff mbox series

[v6,06/20] mm/vma: Change munmap to use vma_munmap_struct() for accounting and surrounding vmas

Message ID 20240820235730.2852400-7-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Avoid MAP_FIXED gap exposure | expand

Commit Message

Liam R. Howlett Aug. 20, 2024, 11:57 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Clean up the code by changing the munmap operation to use a structure
for the accounting and munmap variables.

Since remove_mt() is only called in one location and the contents will
be reduced to almost nothing.  The remains of the function can be added
to vms_complete_munmap_vmas().

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/vma.c | 79 ++++++++++++++++++++++++++++----------------------------
 mm/vma.h |  6 +++++
 2 files changed, 46 insertions(+), 39 deletions(-)

Comments

Lorenzo Stoakes Aug. 21, 2024, 9:59 a.m. UTC | #1
On Tue, Aug 20, 2024 at 07:57:15PM GMT, Liam R. Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
>
> Clean up the code by changing the munmap operation to use a structure
> for the accounting and munmap variables.
>
> Since remove_mt() is only called in one location and the contents will
> be reduced to almost nothing.  The remains of the function can be added
> to vms_complete_munmap_vmas().
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  mm/vma.c | 79 ++++++++++++++++++++++++++++----------------------------
>  mm/vma.h |  6 +++++
>  2 files changed, 46 insertions(+), 39 deletions(-)
>
> diff --git a/mm/vma.c b/mm/vma.c
> index e1aee43a3dc4..7b8b8b983399 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c

[snip]

>  	mm = vms->mm;
> @@ -731,21 +708,26 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
>  	if (vms->unlock)
>  		mmap_write_downgrade(mm);
>
> -	prev = vma_iter_prev_range(vms->vmi);
> -	next = vma_next(vms->vmi);
> -	if (next)
> -		vma_iter_prev_range(vms->vmi);
> -
>  	/*
>  	 * We can free page tables without write-locking mmap_lock because VMAs
>  	 * were isolated before we downgraded mmap_lock.
>  	 */
>  	mas_set(mas_detach, 1);
> -	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
> -		     vms->vma_count, !vms->unlock);
> -	/* Statistics and freeing VMAs */
> +	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
> +		     vms->start, vms->end, vms->vma_count, !vms->unlock);
> +	/* Update high watermark before we lower total_vm */
> +	update_hiwater_vm(mm);
> +	/* Stat accounting */
> +	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
> +	mm->exec_vm -= vms->exec_vm;
> +	mm->stack_vm -= vms->stack_vm;
> +	mm->data_vm -= vms->data_vm;

See below, but I bisected a bug to this patch that manifested because of
miscalculated accounting. So I wonder whether it'd be a good idea to take
this opportunity, when updating mm->... stats to add some:

VM_WARN_ON(vms->exec_vm > mm->exec_vm);

etc. for each of the fields updated. This would help catch any accounting
issues like this with CONFIG_DEBUG_VM switched on.

[snip]

> @@ -824,6 +807,22 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>  			goto munmap_gather_failed;
>
>  		vma_mark_detached(next, true);
> +		nrpages = vma_pages(next);
> +
> +		vms->nr_pages += nrpages;
> +		if (next->vm_flags & VM_LOCKED)
> +			vms->locked_vm += nrpages;

So I bisected a self-test failure, memfd_secret specifically, to this
commit. This is because you are double-counting VM_LOCKED...

> +
> +		if (next->vm_flags & VM_ACCOUNT)
> +			vms->nr_accounted += nrpages;
> +
> +		if (is_exec_mapping(next->vm_flags))
> +			vms->exec_vm += nrpages;
> +		else if (is_stack_mapping(next->vm_flags))
> +			vms->stack_vm += nrpages;
> +		else if (is_data_mapping(next->vm_flags))
> +			vms->data_vm += nrpages;
> +
>  		if (next->vm_flags & VM_LOCKED)
>  			vms->locked_vm += vma_pages(next);

...the double counting being right here :) so I think we should drop the
above couple lines.

>
> @@ -847,7 +846,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
>  		BUG_ON(next->vm_start < vms->start);
>  		BUG_ON(next->vm_start > vms->end);
>  #endif
> -	} for_each_vma_range(*(vms->vmi), next, vms->end);
> +	}
> +
> +	vms->next = vma_next(vms->vmi);
>
>  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
>  	/* Make sure no VMAs are about to be lost. */
> diff --git a/mm/vma.h b/mm/vma.h
> index cb67acf59012..cbf55e0e0c4f 100644
> --- a/mm/vma.h
> +++ b/mm/vma.h
> @@ -33,12 +33,18 @@ struct vma_munmap_struct {
>  	struct vma_iterator *vmi;
>  	struct mm_struct *mm;
>  	struct vm_area_struct *vma;     /* The first vma to munmap */
> +	struct vm_area_struct *prev;    /* vma before the munmap area */
> +	struct vm_area_struct *next;    /* vma after the munmap area */
>  	struct list_head *uf;           /* Userfaultfd list_head */
>  	unsigned long start;            /* Aligned start addr (inclusive) */
>  	unsigned long end;              /* Aligned end addr (exclusive) */
>  	int vma_count;                  /* Number of vmas that will be removed */
>  	unsigned long nr_pages;         /* Number of pages being removed */
>  	unsigned long locked_vm;        /* Number of locked pages */
> +	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
> +	unsigned long exec_vm;
> +	unsigned long stack_vm;
> +	unsigned long data_vm;
>  	bool unlock;                    /* Unlock after the munmap */
>  };
>
> --
> 2.43.0
>
Liam R. Howlett Aug. 21, 2024, 1:17 p.m. UTC | #2
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [240821 05:59]:
> On Tue, Aug 20, 2024 at 07:57:15PM GMT, Liam R. Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> >
> > Clean up the code by changing the munmap operation to use a structure
> > for the accounting and munmap variables.
> >
> > Since remove_mt() is only called in one location and the contents will
> > be reduced to almost nothing.  The remains of the function can be added
> > to vms_complete_munmap_vmas().
> >
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> > Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> > Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  mm/vma.c | 79 ++++++++++++++++++++++++++++----------------------------
> >  mm/vma.h |  6 +++++
> >  2 files changed, 46 insertions(+), 39 deletions(-)
> >
> > diff --git a/mm/vma.c b/mm/vma.c
> > index e1aee43a3dc4..7b8b8b983399 100644
> > --- a/mm/vma.c
> > +++ b/mm/vma.c
> 
> [snip]
> 
> >  	mm = vms->mm;
> > @@ -731,21 +708,26 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
> >  	if (vms->unlock)
> >  		mmap_write_downgrade(mm);
> >
> > -	prev = vma_iter_prev_range(vms->vmi);
> > -	next = vma_next(vms->vmi);
> > -	if (next)
> > -		vma_iter_prev_range(vms->vmi);
> > -
> >  	/*
> >  	 * We can free page tables without write-locking mmap_lock because VMAs
> >  	 * were isolated before we downgraded mmap_lock.
> >  	 */
> >  	mas_set(mas_detach, 1);
> > -	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
> > -		     vms->vma_count, !vms->unlock);
> > -	/* Statistics and freeing VMAs */
> > +	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
> > +		     vms->start, vms->end, vms->vma_count, !vms->unlock);
> > +	/* Update high watermark before we lower total_vm */
> > +	update_hiwater_vm(mm);
> > +	/* Stat accounting */
> > +	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
> > +	mm->exec_vm -= vms->exec_vm;
> > +	mm->stack_vm -= vms->stack_vm;
> > +	mm->data_vm -= vms->data_vm;
> 
> See below, but I bisected a bug to this patch that manifested because of
> miscalculated accounting. So I wonder whether it'd be a good idea to take
> this opportunity, when updating mm->... stats to add some:
> 
> VM_WARN_ON(vms->exec_vm > mm->exec_vm);
> 
> etc. for each of the fields updated. This would help catch any accounting
> issues like this with CONFIG_DEBUG_VM switched on.

Sounds good.

> 
> [snip]
> 
> > @@ -824,6 +807,22 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >  			goto munmap_gather_failed;
> >
> >  		vma_mark_detached(next, true);
> > +		nrpages = vma_pages(next);
> > +
> > +		vms->nr_pages += nrpages;
> > +		if (next->vm_flags & VM_LOCKED)
> > +			vms->locked_vm += nrpages;
> 
> So I bisected a self-test failure, memfd_secret specifically, to this
> commit. This is because you are double-counting VM_LOCKED...
> 
> > +
> > +		if (next->vm_flags & VM_ACCOUNT)
> > +			vms->nr_accounted += nrpages;
> > +
> > +		if (is_exec_mapping(next->vm_flags))
> > +			vms->exec_vm += nrpages;
> > +		else if (is_stack_mapping(next->vm_flags))
> > +			vms->stack_vm += nrpages;
> > +		else if (is_data_mapping(next->vm_flags))
> > +			vms->data_vm += nrpages;
> > +
> >  		if (next->vm_flags & VM_LOCKED)
> >  			vms->locked_vm += vma_pages(next);
> 
> ...the double counting being right here :) so I think we should drop the
> above couple lines.

Yeah, sure.

Thanks for reviewing!

> 
> >
> > @@ -847,7 +846,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
> >  		BUG_ON(next->vm_start < vms->start);
> >  		BUG_ON(next->vm_start > vms->end);
> >  #endif
> > -	} for_each_vma_range(*(vms->vmi), next, vms->end);
> > +	}
> > +
> > +	vms->next = vma_next(vms->vmi);
> >
> >  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> >  	/* Make sure no VMAs are about to be lost. */
> > diff --git a/mm/vma.h b/mm/vma.h
> > index cb67acf59012..cbf55e0e0c4f 100644
> > --- a/mm/vma.h
> > +++ b/mm/vma.h
> > @@ -33,12 +33,18 @@ struct vma_munmap_struct {
> >  	struct vma_iterator *vmi;
> >  	struct mm_struct *mm;
> >  	struct vm_area_struct *vma;     /* The first vma to munmap */
> > +	struct vm_area_struct *prev;    /* vma before the munmap area */
> > +	struct vm_area_struct *next;    /* vma after the munmap area */
> >  	struct list_head *uf;           /* Userfaultfd list_head */
> >  	unsigned long start;            /* Aligned start addr (inclusive) */
> >  	unsigned long end;              /* Aligned end addr (exclusive) */
> >  	int vma_count;                  /* Number of vmas that will be removed */
> >  	unsigned long nr_pages;         /* Number of pages being removed */
> >  	unsigned long locked_vm;        /* Number of locked pages */
> > +	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
> > +	unsigned long exec_vm;
> > +	unsigned long stack_vm;
> > +	unsigned long data_vm;
> >  	bool unlock;                    /* Unlock after the munmap */
> >  };
> >
> > --
> > 2.43.0
> >
diff mbox series

Patch

diff --git a/mm/vma.c b/mm/vma.c
index e1aee43a3dc4..7b8b8b983399 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -103,7 +103,8 @@  static inline void init_vma_munmap(struct vma_munmap_struct *vms,
 	vms->unlock = unlock;
 	vms->uf = uf;
 	vms->vma_count = 0;
-	vms->nr_pages = vms->locked_vm = 0;
+	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
+	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
 }
 
 /*
@@ -299,30 +300,6 @@  static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 	return __split_vma(vmi, vma, addr, new_below);
 }
 
-/*
- * Ok - we have the memory areas we should free on a maple tree so release them,
- * and do the vma updates.
- *
- * Called with the mm semaphore held.
- */
-static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
-{
-	unsigned long nr_accounted = 0;
-	struct vm_area_struct *vma;
-
-	/* Update high watermark before we lower total_vm */
-	update_hiwater_vm(mm);
-	mas_for_each(mas, vma, ULONG_MAX) {
-		long nrpages = vma_pages(vma);
-
-		if (vma->vm_flags & VM_ACCOUNT)
-			nr_accounted += nrpages;
-		vm_stat_account(mm, vma->vm_flags, -nrpages);
-		remove_vma(vma, false);
-	}
-	vm_unacct_memory(nr_accounted);
-}
-
 /*
  * init_vma_prep() - Initializer wrapper for vma_prepare struct
  * @vp: The vma_prepare struct
@@ -722,7 +699,7 @@  static inline void abort_munmap_vmas(struct ma_state *mas_detach)
 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 		struct ma_state *mas_detach)
 {
-	struct vm_area_struct *prev, *next;
+	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 
 	mm = vms->mm;
@@ -731,21 +708,26 @@  static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
 	if (vms->unlock)
 		mmap_write_downgrade(mm);
 
-	prev = vma_iter_prev_range(vms->vmi);
-	next = vma_next(vms->vmi);
-	if (next)
-		vma_iter_prev_range(vms->vmi);
-
 	/*
 	 * We can free page tables without write-locking mmap_lock because VMAs
 	 * were isolated before we downgraded mmap_lock.
 	 */
 	mas_set(mas_detach, 1);
-	unmap_region(mm, mas_detach, vms->vma, prev, next, vms->start, vms->end,
-		     vms->vma_count, !vms->unlock);
-	/* Statistics and freeing VMAs */
+	unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
+		     vms->start, vms->end, vms->vma_count, !vms->unlock);
+	/* Update high watermark before we lower total_vm */
+	update_hiwater_vm(mm);
+	/* Stat accounting */
+	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
+	mm->exec_vm -= vms->exec_vm;
+	mm->stack_vm -= vms->stack_vm;
+	mm->data_vm -= vms->data_vm;
+	/* Remove and clean up vmas */
 	mas_set(mas_detach, 0);
-	remove_mt(mm, mas_detach);
+	mas_for_each(mas_detach, vma, ULONG_MAX)
+		remove_vma(vma, false);
+
+	vm_unacct_memory(vms->nr_accounted);
 	validate_mm(mm);
 	if (vms->unlock)
 		mmap_read_unlock(mm);
@@ -799,18 +781,19 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		if (error)
 			goto start_split_failed;
 	}
+	vms->prev = vma_prev(vms->vmi);
 
 	/*
 	 * Detach a range of VMAs from the mm. Using next as a temp variable as
 	 * it is always overwritten.
 	 */
-	next = vms->vma;
-	do {
+	for_each_vma_range(*(vms->vmi), next, vms->end) {
+		long nrpages;
+
 		if (!can_modify_vma(next)) {
 			error = -EPERM;
 			goto modify_vma_failed;
 		}
-
 		/* Does it split the end? */
 		if (next->vm_end > vms->end) {
 			error = __split_vma(vms->vmi, next, vms->end, 0);
@@ -824,6 +807,22 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 			goto munmap_gather_failed;
 
 		vma_mark_detached(next, true);
+		nrpages = vma_pages(next);
+
+		vms->nr_pages += nrpages;
+		if (next->vm_flags & VM_LOCKED)
+			vms->locked_vm += nrpages;
+
+		if (next->vm_flags & VM_ACCOUNT)
+			vms->nr_accounted += nrpages;
+
+		if (is_exec_mapping(next->vm_flags))
+			vms->exec_vm += nrpages;
+		else if (is_stack_mapping(next->vm_flags))
+			vms->stack_vm += nrpages;
+		else if (is_data_mapping(next->vm_flags))
+			vms->data_vm += nrpages;
+
 		if (next->vm_flags & VM_LOCKED)
 			vms->locked_vm += vma_pages(next);
 
@@ -847,7 +846,9 @@  static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
 		BUG_ON(next->vm_start < vms->start);
 		BUG_ON(next->vm_start > vms->end);
 #endif
-	} for_each_vma_range(*(vms->vmi), next, vms->end);
+	}
+
+	vms->next = vma_next(vms->vmi);
 
 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
 	/* Make sure no VMAs are about to be lost. */
diff --git a/mm/vma.h b/mm/vma.h
index cb67acf59012..cbf55e0e0c4f 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -33,12 +33,18 @@  struct vma_munmap_struct {
 	struct vma_iterator *vmi;
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;     /* The first vma to munmap */
+	struct vm_area_struct *prev;    /* vma before the munmap area */
+	struct vm_area_struct *next;    /* vma after the munmap area */
 	struct list_head *uf;           /* Userfaultfd list_head */
 	unsigned long start;            /* Aligned start addr (inclusive) */
 	unsigned long end;              /* Aligned end addr (exclusive) */
 	int vma_count;                  /* Number of vmas that will be removed */
 	unsigned long nr_pages;         /* Number of pages being removed */
 	unsigned long locked_vm;        /* Number of locked pages */
+	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
+	unsigned long exec_vm;
+	unsigned long stack_vm;
+	unsigned long data_vm;
 	bool unlock;                    /* Unlock after the munmap */
 };