diff mbox series

[1/1] mm/khugepaged: fix vm_lock/i_mmap_rwsem inversion in retract_page_tables

Message ID 20230303213250.3555716-1-surenb@google.com (mailing list archive)
State New
Headers show
Series [1/1] mm/khugepaged: fix vm_lock/i_mmap_rwsem inversion in retract_page_tables | expand

Commit Message

Suren Baghdasaryan March 3, 2023, 9:32 p.m. UTC
Internal syscaller on linux-next reported a lock inversion cause by
vm_lock being taken after i_mmap_rwsem:

======================================================
WARNING: possible circular locking dependency detected
6.2.0-next-20230301-syzkaller #0 Not tainted
------------------------------------------------------
syz-executor115/5084 is trying to acquire lock:
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: vma_start_write include/linux/mm.h:678 [inline]
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1826 [inline]
ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: collapse_file+0x4fa5/0x5980 mm/khugepaged.c:2204

but task is already holding lock:
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: i_mmap_lock_write include/linux/fs.h:468 [inline]
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1745 [inline]
ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: collapse_file+0x3da6/0x5980 mm/khugepaged.c:2204

retract_page_tables takes i_mmap_rwsem before exclusive mmap_lock, which
is inverse to normal order. Deadlock is avoided by try-locking mmap_lock
and skipping on failure to obtain it. Locking the VMA should use the same
locking pattern to avoid this lock inversion.

Fixes: 44a83f2083bd ("mm/khugepaged: write-lock VMA while collapsing a huge page")
Reported-by: syzbot+8955a9646d1a48b8be92@syzkaller.appspotmail.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/mm.h | 39 ++++++++++++++++++++++++++++-----------
 mm/khugepaged.c    |  5 ++++-
 2 files changed, 32 insertions(+), 12 deletions(-)

Comments

Suren Baghdasaryan March 4, 2023, 11:25 p.m. UTC | #1
On Fri, Mar 3, 2023 at 1:32 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> Internal syscaller on linux-next reported a lock inversion cause by
> vm_lock being taken after i_mmap_rwsem:
>
> ======================================================
> WARNING: possible circular locking dependency detected
> 6.2.0-next-20230301-syzkaller #0 Not tainted
> ------------------------------------------------------
> syz-executor115/5084 is trying to acquire lock:
> ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: vma_start_write include/linux/mm.h:678 [inline]
> ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1826 [inline]
> ffff888078307a90 (&vma->vm_lock->lock){++++}-{3:3}, at: collapse_file+0x4fa5/0x5980 mm/khugepaged.c:2204
>
> but task is already holding lock:
> ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: i_mmap_lock_write include/linux/fs.h:468 [inline]
> ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: retract_page_tables mm/khugepaged.c:1745 [inline]
> ffff88801f93efa8 (&mapping->i_mmap_rwsem){++++}-{3:3}, at: collapse_file+0x3da6/0x5980 mm/khugepaged.c:2204
>
> retract_page_tables takes i_mmap_rwsem before exclusive mmap_lock, which
> is inverse to normal order. Deadlock is avoided by try-locking mmap_lock
> and skipping on failure to obtain it. Locking the VMA should use the same
> locking pattern to avoid this lock inversion.
>
> Fixes: 44a83f2083bd ("mm/khugepaged: write-lock VMA while collapsing a huge page")
> Reported-by: syzbot+8955a9646d1a48b8be92@syzkaller.appspotmail.com
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  include/linux/mm.h | 39 ++++++++++++++++++++++++++++-----------
>  mm/khugepaged.c    |  5 ++++-
>  2 files changed, 32 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 1b9be34a24fb..5f16263d176d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -676,18 +676,23 @@ static inline void vma_end_read(struct vm_area_struct *vma)
>         rcu_read_unlock();
>  }
>
> -static inline void vma_start_write(struct vm_area_struct *vma)
> +static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
>  {
> -       int mm_lock_seq;
> -
>         mmap_assert_write_locked(vma->vm_mm);
>
>         /*
>          * current task is holding mmap_write_lock, both vma->vm_lock_seq and
>          * mm->mm_lock_seq can't be concurrently modified.
>          */
> -       mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
> -       if (vma->vm_lock_seq == mm_lock_seq)
> +       *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
> +       return (vma->vm_lock_seq == *mm_lock_seq);
> +}
> +
> +static inline void vma_start_write(struct vm_area_struct *vma)
> +{
> +       int mm_lock_seq;
> +
> +       if (__is_vma_write_locked(vma, &mm_lock_seq))
>                 return;
>
>         down_write(&vma->vm_lock->lock);
> @@ -695,14 +700,26 @@ static inline void vma_start_write(struct vm_area_struct *vma)
>         up_write(&vma->vm_lock->lock);
>  }
>
> +static inline bool vma_try_start_write(struct vm_area_struct *vma)
> +{
> +       int mm_lock_seq;
> +
> +       if (__is_vma_write_locked(vma, &mm_lock_seq))
> +               return true;
> +
> +       if (!down_write_trylock(&vma->vm_lock->lock))
> +               return false;
> +
> +       vma->vm_lock_seq = mm_lock_seq;
> +       up_write(&vma->vm_lock->lock);
> +       return true;
> +}
> +
>  static inline void vma_assert_write_locked(struct vm_area_struct *vma)
>  {
> -       mmap_assert_write_locked(vma->vm_mm);
> -       /*
> -        * current task is holding mmap_write_lock, both vma->vm_lock_seq and
> -        * mm->mm_lock_seq can't be concurrently modified.
> -        */
> -       VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
> +       int mm_lock_seq;
> +
> +       VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
>  }

Hi Andrew,
I missed vma_try_start_write() definition for CONFIG_PER_VMA_LOCK=n
configuration. Could you please patch this with the following?:

--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -740,6 +740,8 @@ static inline bool vma_start_read(struct
vm_area_struct *vma)
  { return false; }
 static inline void vma_end_read(struct vm_area_struct *vma) {}
 static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline bool vma_try_start_write(struct vm_area_struct *vma) {
return true; }
 static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}

or should I send a separate patch?
Thanks,
Suren.


>
>  static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index c64e01f03f27..408fed42c9f5 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1795,6 +1795,10 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
>                 result = SCAN_PTE_MAPPED_HUGEPAGE;
>                 if ((cc->is_khugepaged || is_target) &&
>                     mmap_write_trylock(mm)) {
> +                       /* trylock for the same lock inversion as above */
> +                       if (!vma_try_start_write(vma))
> +                               goto unlock_next;
> +
>                         /*
>                          * Re-check whether we have an ->anon_vma, because
>                          * collapse_and_free_pmd() requires that either no
> @@ -1823,7 +1827,6 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
>                                 result = SCAN_PTE_UFFD_WP;
>                                 goto unlock_next;
>                         }
> -                       vma_start_write(vma);
>                         collapse_and_free_pmd(mm, vma, addr, pmd);
>                         if (!cc->is_khugepaged && is_target)
>                                 result = set_huge_pmd(vma, addr, pmd, hpage);
> --
> 2.40.0.rc0.216.gc4246ad0f0-goog
>
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1b9be34a24fb..5f16263d176d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -676,18 +676,23 @@  static inline void vma_end_read(struct vm_area_struct *vma)
 	rcu_read_unlock();
 }
 
-static inline void vma_start_write(struct vm_area_struct *vma)
+static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
 {
-	int mm_lock_seq;
-
 	mmap_assert_write_locked(vma->vm_mm);
 
 	/*
 	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
 	 * mm->mm_lock_seq can't be concurrently modified.
 	 */
-	mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
-	if (vma->vm_lock_seq == mm_lock_seq)
+	*mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+	return (vma->vm_lock_seq == *mm_lock_seq);
+}
+
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+	int mm_lock_seq;
+
+	if (__is_vma_write_locked(vma, &mm_lock_seq))
 		return;
 
 	down_write(&vma->vm_lock->lock);
@@ -695,14 +700,26 @@  static inline void vma_start_write(struct vm_area_struct *vma)
 	up_write(&vma->vm_lock->lock);
 }
 
+static inline bool vma_try_start_write(struct vm_area_struct *vma)
+{
+	int mm_lock_seq;
+
+	if (__is_vma_write_locked(vma, &mm_lock_seq))
+		return true;
+
+	if (!down_write_trylock(&vma->vm_lock->lock))
+		return false;
+
+	vma->vm_lock_seq = mm_lock_seq;
+	up_write(&vma->vm_lock->lock);
+	return true;
+}
+
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 {
-	mmap_assert_write_locked(vma->vm_mm);
-	/*
-	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
-	 * mm->mm_lock_seq can't be concurrently modified.
-	 */
-	VM_BUG_ON_VMA(vma->vm_lock_seq != READ_ONCE(vma->vm_mm->mm_lock_seq), vma);
+	int mm_lock_seq;
+
+	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
 }
 
 static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c64e01f03f27..408fed42c9f5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1795,6 +1795,10 @@  static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
 		result = SCAN_PTE_MAPPED_HUGEPAGE;
 		if ((cc->is_khugepaged || is_target) &&
 		    mmap_write_trylock(mm)) {
+			/* trylock for the same lock inversion as above */
+			if (!vma_try_start_write(vma))
+				goto unlock_next;
+
 			/*
 			 * Re-check whether we have an ->anon_vma, because
 			 * collapse_and_free_pmd() requires that either no
@@ -1823,7 +1827,6 @@  static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
 				result = SCAN_PTE_UFFD_WP;
 				goto unlock_next;
 			}
-			vma_start_write(vma);
 			collapse_and_free_pmd(mm, vma, addr, pmd);
 			if (!cc->is_khugepaged && is_target)
 				result = set_huge_pmd(vma, addr, pmd, hpage);