diff mbox series

[v4,56/66] mm/mlock: Use maple tree iterators instead of vma linked list

Message ID 20211201142918.921493-57-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series Introducing the Maple Tree | expand

Commit Message

Liam R. Howlett Dec. 1, 2021, 2:30 p.m. UTC
From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/mlock.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

Comments

Vlastimil Babka Jan. 20, 2022, 12:16 p.m. UTC | #1
On 12/1/21 15:30, Liam Howlett wrote:
> From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> 
> Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> ---
>  mm/mlock.c | 19 +++++++++----------
>  1 file changed, 9 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/mlock.c b/mm/mlock.c
> index e263d62ae2d0..feb691eb4c64 100644
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -563,6 +563,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
>  	unsigned long nstart, end, tmp;
>  	struct vm_area_struct *vma, *prev;
>  	int error;
> +	MA_STATE(mas, &current->mm->mm_mt, start, start);
>  
>  	VM_BUG_ON(offset_in_page(start));
>  	VM_BUG_ON(len != PAGE_ALIGN(len));
> @@ -571,11 +572,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
>  		return -EINVAL;
>  	if (end == start)
>  		return 0;
> -	vma = find_vma(current->mm, start);
> -	if (!vma || vma->vm_start > start)
> +	vma = mas_walk(&mas);
> +	if (!vma)
>  		return -ENOMEM;
>  
> -	prev = vma->vm_prev;
> +	prev = mas_prev(&mas, 0);

Could be only done as an 'else' of the 'if' below?

>  	if (start > vma->vm_start)
>  		prev = vma;
>  
> @@ -597,7 +598,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
>  		if (nstart >= end)
>  			break;
>  
> -		vma = prev->vm_next;
> +		vma = find_vma(prev->vm_mm, prev->vm_end);
>  		if (!vma || vma->vm_start != nstart) {
>  			error = -ENOMEM;
>  			break;
> @@ -618,15 +619,12 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
>  {
>  	struct vm_area_struct *vma;
>  	unsigned long count = 0;
> +	MA_STATE(mas, &mm->mm_mt, start, start);
>  
>  	if (mm == NULL)
>  		mm = current->mm;
>  
> -	vma = find_vma(mm, start);
> -	if (vma == NULL)
> -		return 0;
> -
> -	for (; vma ; vma = vma->vm_next) {
> +	mas_for_each(&mas, vma, start + len) {

Could be for_each_vma_range()?

>  		if (start >= vma->vm_end)
>  			continue;

Unnecessary? (even before this patch, I think?)

>  		if (start + len <=  vma->vm_start)

Unnecessary after your patch?

> @@ -741,6 +739,7 @@ static int apply_mlockall_flags(int flags)
>  {
>  	struct vm_area_struct *vma, *prev = NULL;
>  	vm_flags_t to_add = 0;
> +	unsigned long addr = 0;
>  
>  	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
>  	if (flags & MCL_FUTURE) {
> @@ -759,7 +758,7 @@ static int apply_mlockall_flags(int flags)
>  			to_add |= VM_LOCKONFAULT;
>  	}
>  
> -	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
> +	mt_for_each(&current->mm->mm_mt, vma, addr, ULONG_MAX) {
>  		vm_flags_t newflags;
>  
>  		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
Liam R. Howlett Jan. 26, 2022, 4:41 p.m. UTC | #2
* Vlastimil Babka <vbabka@suse.cz> [220120 07:16]:
> On 12/1/21 15:30, Liam Howlett wrote:
> > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
> > 
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
> > ---
> >  mm/mlock.c | 19 +++++++++----------
> >  1 file changed, 9 insertions(+), 10 deletions(-)
> > 
> > diff --git a/mm/mlock.c b/mm/mlock.c
> > index e263d62ae2d0..feb691eb4c64 100644
> > --- a/mm/mlock.c
> > +++ b/mm/mlock.c
> > @@ -563,6 +563,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
> >  	unsigned long nstart, end, tmp;
> >  	struct vm_area_struct *vma, *prev;
> >  	int error;
> > +	MA_STATE(mas, &current->mm->mm_mt, start, start);
> >  
> >  	VM_BUG_ON(offset_in_page(start));
> >  	VM_BUG_ON(len != PAGE_ALIGN(len));
> > @@ -571,11 +572,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
> >  		return -EINVAL;
> >  	if (end == start)
> >  		return 0;
> > -	vma = find_vma(current->mm, start);
> > -	if (!vma || vma->vm_start > start)
> > +	vma = mas_walk(&mas);
> > +	if (!vma)
> >  		return -ENOMEM;
> >  
> > -	prev = vma->vm_prev;
> > +	prev = mas_prev(&mas, 0);
> 
> Could be only done as an 'else' of the 'if' below?

Agreed.  I will make that change.

> 
> >  	if (start > vma->vm_start)
> >  		prev = vma;
> >  
> > @@ -597,7 +598,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
> >  		if (nstart >= end)
> >  			break;
> >  
> > -		vma = prev->vm_next;
> > +		vma = find_vma(prev->vm_mm, prev->vm_end);
> >  		if (!vma || vma->vm_start != nstart) {
> >  			error = -ENOMEM;
> >  			break;
> > @@ -618,15 +619,12 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
> >  {
> >  	struct vm_area_struct *vma;
> >  	unsigned long count = 0;
> > +	MA_STATE(mas, &mm->mm_mt, start, start);
> >  
> >  	if (mm == NULL)
> >  		mm = current->mm;
> >  
> > -	vma = find_vma(mm, start);
> > -	if (vma == NULL)
> > -		return 0;
> > -
> > -	for (; vma ; vma = vma->vm_next) {
> > +	mas_for_each(&mas, vma, start + len) {
> 
> Could be for_each_vma_range()?

yes, I will do this.

> 
> >  		if (start >= vma->vm_end)
> >  			continue;
> 
> Unnecessary? (even before this patch, I think?)

I think so too, I will remove it.

> 
> >  		if (start + len <=  vma->vm_start)
> 
> Unnecessary after your patch?

This appears to be for overflow.  My patch will not deal with overflow
as it is dealt with today.  I will update my patch to deal with overflow
in the same way by removing this from the loop & setting up an end
variable.  This will have the added benefit of reducing the loop for a
one time check.  I don't love the fact that overflow is handled like
this.  Perhaps this should be revisited at a later date.

> 
> > @@ -741,6 +739,7 @@ static int apply_mlockall_flags(int flags)
> >  {
> >  	struct vm_area_struct *vma, *prev = NULL;
> >  	vm_flags_t to_add = 0;
> > +	unsigned long addr = 0;
> >  
> >  	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
> >  	if (flags & MCL_FUTURE) {
> > @@ -759,7 +758,7 @@ static int apply_mlockall_flags(int flags)
> >  			to_add |= VM_LOCKONFAULT;
> >  	}
> >  
> > -	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
> > +	mt_for_each(&current->mm->mm_mt, vma, addr, ULONG_MAX) {

and I'll add a for_each_vma() here.

> >  		vm_flags_t newflags;
> >  
> >  		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
>
diff mbox series

Patch

diff --git a/mm/mlock.c b/mm/mlock.c
index e263d62ae2d0..feb691eb4c64 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -563,6 +563,7 @@  static int apply_vma_lock_flags(unsigned long start, size_t len,
 	unsigned long nstart, end, tmp;
 	struct vm_area_struct *vma, *prev;
 	int error;
+	MA_STATE(mas, &current->mm->mm_mt, start, start);
 
 	VM_BUG_ON(offset_in_page(start));
 	VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -571,11 +572,11 @@  static int apply_vma_lock_flags(unsigned long start, size_t len,
 		return -EINVAL;
 	if (end == start)
 		return 0;
-	vma = find_vma(current->mm, start);
-	if (!vma || vma->vm_start > start)
+	vma = mas_walk(&mas);
+	if (!vma)
 		return -ENOMEM;
 
-	prev = vma->vm_prev;
+	prev = mas_prev(&mas, 0);
 	if (start > vma->vm_start)
 		prev = vma;
 
@@ -597,7 +598,7 @@  static int apply_vma_lock_flags(unsigned long start, size_t len,
 		if (nstart >= end)
 			break;
 
-		vma = prev->vm_next;
+		vma = find_vma(prev->vm_mm, prev->vm_end);
 		if (!vma || vma->vm_start != nstart) {
 			error = -ENOMEM;
 			break;
@@ -618,15 +619,12 @@  static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
 {
 	struct vm_area_struct *vma;
 	unsigned long count = 0;
+	MA_STATE(mas, &mm->mm_mt, start, start);
 
 	if (mm == NULL)
 		mm = current->mm;
 
-	vma = find_vma(mm, start);
-	if (vma == NULL)
-		return 0;
-
-	for (; vma ; vma = vma->vm_next) {
+	mas_for_each(&mas, vma, start + len) {
 		if (start >= vma->vm_end)
 			continue;
 		if (start + len <=  vma->vm_start)
@@ -741,6 +739,7 @@  static int apply_mlockall_flags(int flags)
 {
 	struct vm_area_struct *vma, *prev = NULL;
 	vm_flags_t to_add = 0;
+	unsigned long addr = 0;
 
 	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
 	if (flags & MCL_FUTURE) {
@@ -759,7 +758,7 @@  static int apply_mlockall_flags(int flags)
 			to_add |= VM_LOCKONFAULT;
 	}
 
-	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+	mt_for_each(&current->mm->mm_mt, vma, addr, ULONG_MAX) {
 		vm_flags_t newflags;
 
 		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;