diff mbox series

[v4,51/66] mm/khugepaged: Use maple tree iterators instead of vma linked list

Message ID 20211201142918.921493-52-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Introducing the Maple Tree | expand

Commit Message

Liam R. Howlett Dec. 1, 2021, 2:30 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/huge_memory.c | 4 ++--
 mm/khugepaged.c  | 9 ++++++---
 2 files changed, 8 insertions(+), 5 deletions(-)

Comments

Vlastimil Babka Jan. 19, 2022, 5:48 p.m. UTC | #1
On 12/1/21 15:30, Liam Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Ah, got tired of copy/pasting for a number of patches, I see :)

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> ---
>  mm/huge_memory.c | 4 ++--
>  mm/khugepaged.c  | 9 ++++++---
>  2 files changed, 8 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index e5483347291c..f0f4ff5239ef 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2270,11 +2270,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
>  	split_huge_pmd_if_needed(vma, end);
>  
>  	/*
> -	 * If we're also updating the vma->vm_next->vm_start,
> +	 * If we're also updating the vma_next(vma)->vm_start,

vma_next() takes an iterator, not vma, though.

>  	 * check if we need to split it.
>  	 */
>  	if (adjust_next > 0) {
> -		struct vm_area_struct *next = vma->vm_next;
> +		struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
>  		unsigned long nstart = next->vm_start;
>  		nstart += adjust_next;
>  		split_huge_pmd_if_needed(next, nstart);
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 0ff7d72cdd1d..8f0633481791 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -2083,6 +2083,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
>  	struct mm_struct *mm;
>  	struct vm_area_struct *vma;
>  	int progress = 0;
> +	unsigned long address;
>  
>  	VM_BUG_ON(!pages);
>  	lockdep_assert_held(&khugepaged_mm_lock);
> @@ -2106,11 +2107,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
>  	vma = NULL;
>  	if (unlikely(!mmap_read_trylock(mm)))
>  		goto breakouterloop_mmap_lock;
> -	if (likely(!khugepaged_test_exit(mm)))
> -		vma = find_vma(mm, khugepaged_scan.address);
>  
>  	progress++;
> -	for (; vma; vma = vma->vm_next) {
> +	if (unlikely(khugepaged_test_exit(mm)))
> +		goto breakouterloop;
> +
> +	address = khugepaged_scan.address;
> +	mt_for_each(&mm->mm_mt, vma, address, ULONG_MAX) {

Why not via mas_for_each()?

>  		unsigned long hstart, hend;
>  
>  		cond_resched();
Liam R. Howlett Jan. 25, 2022, 10:03 p.m. UTC | #2
* Vlastimil Babka <vbabka@suse.cz> [220119 12:48]:
> On 12/1/21 15:30, Liam Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> 
> Ah, got tired of copy/pasting for a number of patches, I see :)
> 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > ---
> >  mm/huge_memory.c | 4 ++--
> >  mm/khugepaged.c  | 9 ++++++---
> >  2 files changed, 8 insertions(+), 5 deletions(-)
> > 
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index e5483347291c..f0f4ff5239ef 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -2270,11 +2270,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
> >  	split_huge_pmd_if_needed(vma, end);
> >  
> >  	/*
> > -	 * If we're also updating the vma->vm_next->vm_start,
> > +	 * If we're also updating the vma_next(vma)->vm_start,
> 
> vma_next() takes an iterator, not vma, though.

I will fix this comment to be less code-specific since next isn't used
anyways.

> 
> >  	 * check if we need to split it.
> >  	 */
> >  	if (adjust_next > 0) {
> > -		struct vm_area_struct *next = vma->vm_next;
> > +		struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
> >  		unsigned long nstart = next->vm_start;
> >  		nstart += adjust_next;
> >  		split_huge_pmd_if_needed(next, nstart);
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index 0ff7d72cdd1d..8f0633481791 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -2083,6 +2083,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> >  	struct mm_struct *mm;
> >  	struct vm_area_struct *vma;
> >  	int progress = 0;
> > +	unsigned long address;
> >  
> >  	VM_BUG_ON(!pages);
> >  	lockdep_assert_held(&khugepaged_mm_lock);
> > @@ -2106,11 +2107,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> >  	vma = NULL;
> >  	if (unlikely(!mmap_read_trylock(mm)))
> >  		goto breakouterloop_mmap_lock;
> > -	if (likely(!khugepaged_test_exit(mm)))
> > -		vma = find_vma(mm, khugepaged_scan.address);
> >  
> >  	progress++;
> > -	for (; vma; vma = vma->vm_next) {
> > +	if (unlikely(khugepaged_test_exit(mm)))
> > +		goto breakouterloop;
> > +
> > +	address = khugepaged_scan.address;
> > +	mt_for_each(&mm->mm_mt, vma, address, ULONG_MAX) {
> 
> Why not via mas_for_each()?

I will use for_each_vma() here.

> 
> >  		unsigned long hstart, hend;
> >  
> >  		cond_resched();
>
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e5483347291c..f0f4ff5239ef 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2270,11 +2270,11 @@  void vma_adjust_trans_huge(struct vm_area_struct *vma,
 	split_huge_pmd_if_needed(vma, end);
 
 	/*
-	 * If we're also updating the vma->vm_next->vm_start,
+	 * If we're also updating the vma_next(vma)->vm_start,
 	 * check if we need to split it.
 	 */
 	if (adjust_next > 0) {
-		struct vm_area_struct *next = vma->vm_next;
+		struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
 		unsigned long nstart = next->vm_start;
 		nstart += adjust_next;
 		split_huge_pmd_if_needed(next, nstart);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0ff7d72cdd1d..8f0633481791 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2083,6 +2083,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	struct mm_struct *mm;
 	struct vm_area_struct *vma;
 	int progress = 0;
+	unsigned long address;
 
 	VM_BUG_ON(!pages);
 	lockdep_assert_held(&khugepaged_mm_lock);
@@ -2106,11 +2107,13 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 	vma = NULL;
 	if (unlikely(!mmap_read_trylock(mm)))
 		goto breakouterloop_mmap_lock;
-	if (likely(!khugepaged_test_exit(mm)))
-		vma = find_vma(mm, khugepaged_scan.address);
 
 	progress++;
-	for (; vma; vma = vma->vm_next) {
+	if (unlikely(khugepaged_test_exit(mm)))
+		goto breakouterloop;
+
+	address = khugepaged_scan.address;
+	mt_for_each(&mm->mm_mt, vma, address, ULONG_MAX) {
 		unsigned long hstart, hend;
 
 		cond_resched();