diff mbox series

[v2,3/6] mm: replace mmap with vma write lock assertions when operating on a vma

Message ID 20230801220733.1987762-4-surenb@google.com (mailing list archive)
State New
Headers show
Series make vma locking more obvious | expand

Commit Message

Suren Baghdasaryan Aug. 1, 2023, 10:07 p.m. UTC
Vma write lock assertion always includes mmap write lock assertion and
additional vma lock checks when per-VMA locks are enabled. Replace
weaker mmap_assert_write_locked() assertions with stronger
vma_assert_write_locked() ones when we are operating on a vma which
is expected to be locked.

Suggested-by: Jann Horn <jannh@google.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/hugetlb.c    | 2 +-
 mm/khugepaged.c | 5 +++--
 mm/memory.c     | 2 +-
 3 files changed, 5 insertions(+), 4 deletions(-)

Comments

Liam R. Howlett Aug. 2, 2023, 5:13 p.m. UTC | #1
* Suren Baghdasaryan <surenb@google.com> [230801 18:07]:
> Vma write lock assertion always includes mmap write lock assertion and
> additional vma lock checks when per-VMA locks are enabled. Replace
> weaker mmap_assert_write_locked() assertions with stronger
> vma_assert_write_locked() ones when we are operating on a vma which
> is expected to be locked.
> 
> Suggested-by: Jann Horn <jannh@google.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
>  mm/hugetlb.c    | 2 +-
>  mm/khugepaged.c | 5 +++--
>  mm/memory.c     | 2 +-
>  3 files changed, 5 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 64a3239b6407..1d871a1167d8 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5028,7 +5028,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
>  					src_vma->vm_start,
>  					src_vma->vm_end);
>  		mmu_notifier_invalidate_range_start(&range);
> -		mmap_assert_write_locked(src);
> +		vma_assert_write_locked(src_vma);
>  		raw_write_seqcount_begin(&src->write_protect_seq);
>  	} else {
>  		/*
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 78c8d5d8b628..1e43a56fba31 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1495,7 +1495,7 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
>  	};
>  
>  	VM_BUG_ON(!PageTransHuge(hpage));
> -	mmap_assert_write_locked(vma->vm_mm);
> +	vma_assert_write_locked(vma);
>  
>  	if (do_set_pmd(&vmf, hpage))
>  		return SCAN_FAIL;
> @@ -1525,7 +1525,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
>  	pmd_t pmd;
>  	struct mmu_notifier_range range;
>  
> -	mmap_assert_write_locked(mm);
> +	vma_assert_write_locked(vma);
>  	if (vma->vm_file)
>  		lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
>  	/*
> @@ -1570,6 +1570,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>  	int count = 0, result = SCAN_FAIL;
>  	int i;
>  
> +	/* Ensure vma can't change, it will be locked below after checks */
>  	mmap_assert_write_locked(mm);
>  
>  	/* Fast check before locking page if already PMD-mapped */
> diff --git a/mm/memory.c b/mm/memory.c
> index 603b2f419948..652d99b9858a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1312,7 +1312,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
>  		 * Use the raw variant of the seqcount_t write API to avoid
>  		 * lockdep complaining about preemptibility.
>  		 */
> -		mmap_assert_write_locked(src_mm);
> +		vma_assert_write_locked(src_vma);
>  		raw_write_seqcount_begin(&src_mm->write_protect_seq);
>  	}
>  
> -- 
> 2.41.0.585.gd2178a4bd4-goog
>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 64a3239b6407..1d871a1167d8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5028,7 +5028,7 @@  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 					src_vma->vm_start,
 					src_vma->vm_end);
 		mmu_notifier_invalidate_range_start(&range);
-		mmap_assert_write_locked(src);
+		vma_assert_write_locked(src_vma);
 		raw_write_seqcount_begin(&src->write_protect_seq);
 	} else {
 		/*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 78c8d5d8b628..1e43a56fba31 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1495,7 +1495,7 @@  static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
 	};
 
 	VM_BUG_ON(!PageTransHuge(hpage));
-	mmap_assert_write_locked(vma->vm_mm);
+	vma_assert_write_locked(vma);
 
 	if (do_set_pmd(&vmf, hpage))
 		return SCAN_FAIL;
@@ -1525,7 +1525,7 @@  static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
 	pmd_t pmd;
 	struct mmu_notifier_range range;
 
-	mmap_assert_write_locked(mm);
+	vma_assert_write_locked(vma);
 	if (vma->vm_file)
 		lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
 	/*
@@ -1570,6 +1570,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	int count = 0, result = SCAN_FAIL;
 	int i;
 
+	/* Ensure vma can't change, it will be locked below after checks */
 	mmap_assert_write_locked(mm);
 
 	/* Fast check before locking page if already PMD-mapped */
diff --git a/mm/memory.c b/mm/memory.c
index 603b2f419948..652d99b9858a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1312,7 +1312,7 @@  copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
 		 * Use the raw variant of the seqcount_t write API to avoid
 		 * lockdep complaining about preemptibility.
 		 */
-		mmap_assert_write_locked(src_mm);
+		vma_assert_write_locked(src_vma);
 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
 	}