diff mbox

mm: another VM_BUG_ON_PAGE(PageTail(page))

Message ID 20160129123544.GB146512@black.fi.intel.com (mailing list archive)
State Not Applicable, archived
Headers show

Commit Message

Kirill A. Shutemov Jan. 29, 2016, 12:35 p.m. UTC
From 691a961bb401c5815ed741dac63591efbc6027e3 Mon Sep 17 00:00:00 2001
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Date: Fri, 29 Jan 2016 15:06:17 +0300
Subject: [PATCH 2/2] mempolicy: do not try to queue pages from
 !vma_migratable()

Maybe I miss some point, but I don't see a reason why we try to queue
pages from non migratable VMAs.

The only case when we can queue pages from such VMA is MPOL_MF_STRICT
plus MPOL_MF_MOVE or MPOL_MF_MOVE_ALL for VMA which has pages on LRU,
but gfp mask is not sutable for migaration (see mapping_gfp_mask() check
in vma_migratable()). That's looks like a bug to me.

Let's filter out non-migratable vma at start of queue_pages_test_walk()
and go to queue_pages_pte_range() only if MPOL_MF_MOVE or
MPOL_MF_MOVE_ALL flag is set.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 mm/mempolicy.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

Comments

Dmitry Vyukov Feb. 1, 2016, 10:48 a.m. UTC | #1
On Fri, Jan 29, 2016 at 1:35 PM, Kirill A. Shutemov
<kirill.shutemov@linux.intel.com> wrote:
> From 691a961bb401c5815ed741dac63591efbc6027e3 Mon Sep 17 00:00:00 2001
> From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
> Date: Fri, 29 Jan 2016 15:06:17 +0300
> Subject: [PATCH 2/2] mempolicy: do not try to queue pages from
>  !vma_migratable()
>
> Maybe I miss some point, but I don't see a reason why we try to queue
> pages from non migratable VMAs.
>
> The only case when we can queue pages from such VMA is MPOL_MF_STRICT
> plus MPOL_MF_MOVE or MPOL_MF_MOVE_ALL for VMA which has pages on LRU,
> but gfp mask is not sutable for migaration (see mapping_gfp_mask() check
> in vma_migratable()). That's looks like a bug to me.
>
> Let's filter out non-migratable vma at start of queue_pages_test_walk()
> and go to queue_pages_pte_range() only if MPOL_MF_MOVE or
> MPOL_MF_MOVE_ALL flag is set.


I've run the fuzzer with these two patches for the weekend and seen no crashes.
I guess we can consider this as fixed.
Thanks!


> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  mm/mempolicy.c | 14 +++++---------
>  1 file changed, 5 insertions(+), 9 deletions(-)
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 27d135408a22..4c4187c0e1de 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -548,8 +548,7 @@ retry:
>                         goto retry;
>                 }
>
> -               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
> -                       migrate_page_add(page, qp->pagelist, flags);
> +               migrate_page_add(page, qp->pagelist, flags);
>         }
>         pte_unmap_unlock(pte - 1, ptl);
>         cond_resched();
> @@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
>         unsigned long endvma = vma->vm_end;
>         unsigned long flags = qp->flags;
>
> -       if (vma->vm_flags & VM_PFNMAP)
> +       if (!vma_migratable(vma))
>                 return 1;
>
>         if (endvma > end)
> @@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
>
>         if (flags & MPOL_MF_LAZY) {
>                 /* Similar to task_numa_work, skip inaccessible VMAs */
> -               if (vma_migratable(vma) &&
> -                       vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
> +               if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
>                         change_prot_numa(vma, start, endvma);
>                 return 1;
>         }
>
> -       if ((flags & MPOL_MF_STRICT) ||
> -           ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
> -            vma_migratable(vma)))
> -               /* queue pages from current vma */
> +       /* queue pages from current vma */
> +       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
>                 return 0;
>         return 1;
>  }
> --
> 2.7.0.rc3
>
> --
> You received this message because you are subscribed to the Google Groups "syzkaller" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to syzkaller+unsubscribe@googlegroups.com.
> For more options, visit https://groups.google.com/d/optout.
--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 27d135408a22..4c4187c0e1de 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -548,8 +548,7 @@  retry:
 			goto retry;
 		}
 
-		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
-			migrate_page_add(page, qp->pagelist, flags);
+		migrate_page_add(page, qp->pagelist, flags);
 	}
 	pte_unmap_unlock(pte - 1, ptl);
 	cond_resched();
@@ -625,7 +624,7 @@  static int queue_pages_test_walk(unsigned long start, unsigned long end,
 	unsigned long endvma = vma->vm_end;
 	unsigned long flags = qp->flags;
 
-	if (vma->vm_flags & VM_PFNMAP)
+	if (!vma_migratable(vma))
 		return 1;
 
 	if (endvma > end)
@@ -644,16 +643,13 @@  static int queue_pages_test_walk(unsigned long start, unsigned long end,
 
 	if (flags & MPOL_MF_LAZY) {
 		/* Similar to task_numa_work, skip inaccessible VMAs */
-		if (vma_migratable(vma) &&
-			vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
 			change_prot_numa(vma, start, endvma);
 		return 1;
 	}
 
-	if ((flags & MPOL_MF_STRICT) ||
-	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
-	     vma_migratable(vma)))
-		/* queue pages from current vma */
+	/* queue pages from current vma */
+	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 		return 0;
 	return 1;
 }