diff mbox series

[1/2] mm/mlock: not handle NULL vma specially

Message ID 20220504003958.18402-1-richard.weiyang@gmail.com (mailing list archive)
State New
Headers show
Series [1/2] mm/mlock: not handle NULL vma specially | expand

Commit Message

Wei Yang May 4, 2022, 12:39 a.m. UTC
If we can't find a proper vma, the loop would terminate as expected.

It's not necessary to handle it specially.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 mm/mlock.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

Comments

Andrew Morton May 7, 2022, 9:03 p.m. UTC | #1
On Wed,  4 May 2022 00:39:57 +0000 Wei Yang <richard.weiyang@gmail.com> wrote:

> If we can't find a proper vma, the loop would terminate as expected.
> 
> It's not necessary to handle it specially.
> 
> ...
>
> --- a/mm/mlock.c
> +++ b/mm/mlock.c
> @@ -504,11 +504,7 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
>  	if (mm == NULL)
>  		mm = current->mm;
>  
> -	vma = find_vma(mm, start);
> -	if (vma == NULL)
> -		return 0;
> -
> -	for (; vma ; vma = vma->vm_next) {
> +	for (vma = find_vma(mm, start); vma ; vma = vma->vm_next) {
>  		if (start >= vma->vm_end)
>  			continue;
>  		if (start + len <=  vma->vm_start)

The mapletree patches mangle this code a lot.

Please take a look at linux-next or the mm-unstabe branch at
git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm early to mid next
week, see if you see anything which should be addressed.
Wei Yang May 7, 2022, 9:56 p.m. UTC | #2
On Sun, May 8, 2022 at 5:03 AM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed,  4 May 2022 00:39:57 +0000 Wei Yang <richard.weiyang@gmail.com> wrote:
>
> > If we can't find a proper vma, the loop would terminate as expected.
> >
> > It's not necessary to handle it specially.
> >
> > ...
> >
> > --- a/mm/mlock.c
> > +++ b/mm/mlock.c
> > @@ -504,11 +504,7 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
> >       if (mm == NULL)
> >               mm = current->mm;
> >
> > -     vma = find_vma(mm, start);
> > -     if (vma == NULL)
> > -             return 0;
> > -
> > -     for (; vma ; vma = vma->vm_next) {
> > +     for (vma = find_vma(mm, start); vma ; vma = vma->vm_next) {
> >               if (start >= vma->vm_end)
> >                       continue;
> >               if (start + len <=  vma->vm_start)
>
> The mapletree patches mangle this code a lot.
>
> Please take a look at linux-next or the mm-unstabe branch at
> git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm early to mid next
> week, see if you see anything which should be addressed.
>

I took a look at mm-unstabe branch with last commit

  2b58b3f33ba2 mm/shmem: convert shmem_swapin_page() to shmem_swapin_folio()

Function count_mm_mlocked_page_nr() looks not changed.

Do I need to rebase on top of it?
Andrew Morton May 7, 2022, 10:44 p.m. UTC | #3
On Sun, 8 May 2022 05:56:15 +0800 Wei Yang <richard.weiyang@gmail.com> wrote:

> > > -     vma = find_vma(mm, start);
> > > -     if (vma == NULL)
> > > -             return 0;
> > > -
> > > -     for (; vma ; vma = vma->vm_next) {
> > > +     for (vma = find_vma(mm, start); vma ; vma = vma->vm_next) {
> > >               if (start >= vma->vm_end)
> > >                       continue;
> > >               if (start + len <=  vma->vm_start)
> >
> > The mapletree patches mangle this code a lot.
> >
> > Please take a look at linux-next or the mm-unstabe branch at
> > git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm early to mid next
> > week, see if you see anything which should be addressed.
> >
> 
> I took a look at mm-unstabe branch with last commit
> 
>   2b58b3f33ba2 mm/shmem: convert shmem_swapin_page() to shmem_swapin_folio()
> 

It isn't early to mid next week yet ;)

> Function count_mm_mlocked_page_nr() looks not changed.
> 
> Do I need to rebase on top of it?

static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
		unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
	unsigned long count = 0;
	unsigned long end;
	VMA_ITERATOR(vmi, mm, start);

	if (mm == NULL)
		mm = current->mm;

	/* Don't overflow past ULONG_MAX */
	if (unlikely(ULONG_MAX - len < start))
		end = ULONG_MAX;
	else
		end = start + len;
	for_each_vma_range(vmi, vma, end) {
		if (vma->vm_flags & VM_LOCKED) {
			if (start > vma->vm_start)
				count -= (start - vma->vm_start);
			if (end < vma->vm_end) {
				count += end - vma->vm_start;
				break;
			}
			count += vma->vm_end - vma->vm_start;
		}
	}

	return count >> PAGE_SHIFT;
}
diff mbox series

Patch

diff --git a/mm/mlock.c b/mm/mlock.c
index efd2dd2943de..0b7cf7d60922 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -504,11 +504,7 @@  static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
 	if (mm == NULL)
 		mm = current->mm;
 
-	vma = find_vma(mm, start);
-	if (vma == NULL)
-		return 0;
-
-	for (; vma ; vma = vma->vm_next) {
+	for (vma = find_vma(mm, start); vma ; vma = vma->vm_next) {
 		if (start >= vma->vm_end)
 			continue;
 		if (start + len <=  vma->vm_start)