diff mbox series

[v4,40/66] exec: Use VMA iterator instead of linked list

Message ID 20211201142918.921493-41-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Introducing the Maple Tree | expand

Commit Message

Liam R. Howlett Dec. 1, 2021, 2:30 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Remove a use of the vm_next list by doing the initial lookup with the
VMA iterator and then using it to find the next entry.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 fs/exec.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka Jan. 19, 2022, 11:06 a.m. UTC | #1
On 12/1/21 15:30, Liam Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> 
> Remove a use of the vm_next list by doing the initial lookup with the
> VMA iterator and then using it to find the next entry.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  fs/exec.c | 9 ++++++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
> 
> diff --git a/fs/exec.c b/fs/exec.c
> index fee18b63ed35..f033745c148a 100644
> --- a/fs/exec.c
> +++ b/fs/exec.c
> @@ -680,6 +680,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
>  	unsigned long length = old_end - old_start;
>  	unsigned long new_start = old_start - shift;
>  	unsigned long new_end = old_end - shift;
> +	VMA_ITERATOR(vmi, mm, new_start);
> +	struct vm_area_struct *next;
>  	struct mmu_gather tlb;
>  
>  	BUG_ON(new_start > new_end);
> @@ -688,7 +690,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
>  	 * ensure there are no vmas between where we want to go
>  	 * and where we are
>  	 */
> -	if (vma != find_vma(mm, new_start))
> +	if (vma != vma_next(&vmi))
>  		return -EFAULT;
>  
>  	/*
> @@ -707,12 +709,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
>  
>  	lru_add_drain();
>  	tlb_gather_mmu(&tlb, mm);
> +	next = vma_next(&vmi);
>  	if (new_end > old_start) {
>  		/*
>  		 * when the old and new regions overlap clear from new_end.
>  		 */
>  		free_pgd_range(&tlb, new_end, old_end, new_end,
> -			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
> +			next ? next->vm_start : USER_PGTABLES_CEILING);
>  	} else {
>  		/*
>  		 * otherwise, clean from old_start; this is done to not touch
> @@ -721,7 +724,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
>  		 * for the others its just a little faster.
>  		 */
>  		free_pgd_range(&tlb, old_start, old_end, new_end,
> -			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
> +			next ? next->vm_start : USER_PGTABLES_CEILING);
>  	}
>  	tlb_finish_mmu(&tlb);
>
diff mbox series

Patch

diff --git a/fs/exec.c b/fs/exec.c
index fee18b63ed35..f033745c148a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -680,6 +680,8 @@  static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 	unsigned long length = old_end - old_start;
 	unsigned long new_start = old_start - shift;
 	unsigned long new_end = old_end - shift;
+	VMA_ITERATOR(vmi, mm, new_start);
+	struct vm_area_struct *next;
 	struct mmu_gather tlb;
 
 	BUG_ON(new_start > new_end);
@@ -688,7 +690,7 @@  static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 	 * ensure there are no vmas between where we want to go
 	 * and where we are
 	 */
-	if (vma != find_vma(mm, new_start))
+	if (vma != vma_next(&vmi))
 		return -EFAULT;
 
 	/*
@@ -707,12 +709,13 @@  static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 
 	lru_add_drain();
 	tlb_gather_mmu(&tlb, mm);
+	next = vma_next(&vmi);
 	if (new_end > old_start) {
 		/*
 		 * when the old and new regions overlap clear from new_end.
 		 */
 		free_pgd_range(&tlb, new_end, old_end, new_end,
-			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+			next ? next->vm_start : USER_PGTABLES_CEILING);
 	} else {
 		/*
 		 * otherwise, clean from old_start; this is done to not touch
@@ -721,7 +724,7 @@  static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 		 * for the others its just a little faster.
 		 */
 		free_pgd_range(&tlb, old_start, old_end, new_end,
-			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+			next ? next->vm_start : USER_PGTABLES_CEILING);
 	}
 	tlb_finish_mmu(&tlb);