diff mbox series

[v6] hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing

Message ID 20221031223440.285187-1-mike.kravetz@oracle.com (mailing list archive)
State New
Headers show
Series [v6] hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing | expand

Commit Message

Mike Kravetz Oct. 31, 2022, 10:34 p.m. UTC
madvise(MADV_DONTNEED) ends up calling zap_page_range() to clear the page
tables associated with the address range.  For hugetlb vmas,
zap_page_range will call __unmap_hugepage_range_final.  However,
__unmap_hugepage_range_final assumes the passed vma is about to be removed
and deletes the vma_lock to prevent pmd sharing as the vma is on the way
out.  In the case of madvise(MADV_DONTNEED) the vma remains, but the
missing vma_lock prevents pmd sharing and could potentially lead to issues
with truncation/fault races.

This issue was originally reported here [1] as a BUG triggered in
page_try_dup_anon_rmap.  Prior to the introduction of the hugetlb
vma_lock, __unmap_hugepage_range_final cleared the VM_MAYSHARE flag to
prevent pmd sharing.  Subsequent faults on this vma were confused as
VM_MAYSHARE indicates a sharable vma, but was not set so page_mapping was
not set in new pages added to the page table.  This resulted in pages that
appeared anonymous in a VM_SHARED vma and triggered the BUG.

Create a new routine clear_hugetlb_page_range() that can be called from
madvise(MADV_DONTNEED) for hugetlb vmas.  It has the same setup as
zap_page_range, but does not delete the vma_lock.  Also, add a new zap
flag ZAP_FLAG_UNMAP to indicate an unmap call from unmap_vmas().  This
is used to indicate the 'final' unmapping of a vma.  The routine
__unmap_hugepage_range to take a notification_needed argument.  This is
used to prevent duplicate notifications.

[1] https://lore.kernel.org/lkml/CAO4mrfdLMXsao9RF4fUE8-Wfde8xmjsKrTNMNC9wjUb6JudD0g@mail.gmail.com/
Fixes: 90e7e7f5ef3f ("mm: enable MADV_DONTNEED for hugetlb mappings")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reported-by: Wei Chen <harperchen1110@gmail.com>
Cc: <stable@vger.kernel.org>
---
 include/linux/hugetlb.h |  7 ++++
 include/linux/mm.h      |  3 ++
 mm/hugetlb.c            | 80 ++++++++++++++++++++++++++++++-----------
 mm/memory.c             | 18 ++++++----
 4 files changed, 82 insertions(+), 26 deletions(-)

Comments

Nadav Amit Nov. 1, 2022, 12:54 a.m. UTC | #1
On Oct 31, 2022, at 3:34 PM, Mike Kravetz <mike.kravetz@oracle.com> wrote:

> madvise(MADV_DONTNEED) ends up calling zap_page_range() to clear the page
> tables associated with the address range.  For hugetlb vmas,
> zap_page_range will call __unmap_hugepage_range_final.  However,
> __unmap_hugepage_range_final assumes the passed vma is about to be removed
> and deletes the vma_lock to prevent pmd sharing as the vma is on the way
> out.  In the case of madvise(MADV_DONTNEED) the vma remains, but the
> missing vma_lock prevents pmd sharing and could potentially lead to issues
> with truncation/fault races.
> 

[snip]

> index 978c17df053e..517c8cc8ccb9 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3464,4 +3464,7 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
>  */
> #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
> 
> +/* Set in unmap_vmas() to indicate an unmap call.  Only used by hugetlb */
> +#define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))

PeterZ wants to add ZAP_FLAG_FORCE_FLUSH that would be set on
zap_pte_range(). Not sure you would want to combine them both together, but
at least be aware of potential conflict.

https://lore.kernel.org/all/Y1f7YvKuwOl1XEwU@hirez.programming.kicks-ass.net/

[snip]

> +#ifdef CONFIG_ADVISE_SYSCALLS
> +/*
> + * Similar setup as in zap_page_range().  madvise(MADV_DONTNEED) can not call
> + * zap_page_range for hugetlb vmas as __unmap_hugepage_range_final will delete
> + * the associated vma_lock.
> + */
> +void clear_hugetlb_page_range(struct vm_area_struct *vma, unsigned long start,
> +				unsigned long end)
> +{
> +	struct mmu_notifier_range range;
> +	struct mmu_gather tlb;
> +
> +	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
> +				start, end);
> +	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
> +	tlb_gather_mmu(&tlb, vma->vm_mm);
> +	update_hiwater_rss(vma->vm_mm);
> +	mmu_notifier_invalidate_range_start(&range);
> +
> +	__unmap_hugepage_range_locking(&tlb, vma, start, end, NULL, 0);
> +
> +	mmu_notifier_invalidate_range_end(&range);
> +	tlb_finish_mmu(&tlb);
> }
> +#endif

I hate ifdef’s. And the second definition of clear_hugetlb_page_range() is
confusing since it does not have an ifdef at all. . How about moving the
ifdef’s into the function like being done in io_madvise_prep()? I think it
is less confusing.

[ snip ]

> 
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1671,7 +1671,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
> {
> 	struct mmu_notifier_range range;
> 	struct zap_details details = {
> -		.zap_flags = ZAP_FLAG_DROP_MARKER,
> +		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
> 		/* Careful - we need to zap private pages too! */
> 		.even_cows = true,
> 	};
> @@ -1704,15 +1704,21 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
> 	MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
> 
> 	lru_add_drain();
> -	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
> -				start, start + size);
> 	tlb_gather_mmu(&tlb, vma->vm_mm);
> 	update_hiwater_rss(vma->vm_mm);
> -	mmu_notifier_invalidate_range_start(&range);
> 	do {
> -		unmap_single_vma(&tlb, vma, start, range.end, NULL);
> +		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma,
> +				vma->vm_mm,
> +				max(start, vma->vm_start),
> +				min(start + size, vma->vm_end));
> +		if (is_vm_hugetlb_page(vma))
> +			adjust_range_if_pmd_sharing_possible(vma,
> +				&range.start,
> +				&range.end);
> +		mmu_notifier_invalidate_range_start(&range);
> +		unmap_single_vma(&tlb, vma, start, start + size, NULL);

Is there a reason that you wouldn’t use range.start and range.end here?
At least for consistency.
Mike Kravetz Nov. 1, 2022, 1:48 a.m. UTC | #2
On 10/31/22 17:54, Nadav Amit wrote:
> On Oct 31, 2022, at 3:34 PM, Mike Kravetz <mike.kravetz@oracle.com> wrote:
> 
> > madvise(MADV_DONTNEED) ends up calling zap_page_range() to clear the page
> > tables associated with the address range.  For hugetlb vmas,
> > zap_page_range will call __unmap_hugepage_range_final.  However,
> > __unmap_hugepage_range_final assumes the passed vma is about to be removed
> > and deletes the vma_lock to prevent pmd sharing as the vma is on the way
> > out.  In the case of madvise(MADV_DONTNEED) the vma remains, but the
> > missing vma_lock prevents pmd sharing and could potentially lead to issues
> > with truncation/fault races.
> > 
> 
> [snip]
> 
> > index 978c17df053e..517c8cc8ccb9 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -3464,4 +3464,7 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
> >  */
> > #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
> > 
> > +/* Set in unmap_vmas() to indicate an unmap call.  Only used by hugetlb */
> > +#define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
> 
> PeterZ wants to add ZAP_FLAG_FORCE_FLUSH that would be set on
> zap_pte_range(). Not sure you would want to combine them both together, but
> at least be aware of potential conflict.
> 
> https://lore.kernel.org/all/Y1f7YvKuwOl1XEwU@hirez.programming.kicks-ass.net/
> 

Ok, noted

> [snip]
> 
> > +#ifdef CONFIG_ADVISE_SYSCALLS
> > +/*
> > + * Similar setup as in zap_page_range().  madvise(MADV_DONTNEED) can not call
> > + * zap_page_range for hugetlb vmas as __unmap_hugepage_range_final will delete
> > + * the associated vma_lock.
> > + */
> > +void clear_hugetlb_page_range(struct vm_area_struct *vma, unsigned long start,
> > +				unsigned long end)
> > +{
> > +	struct mmu_notifier_range range;
> > +	struct mmu_gather tlb;
> > +
> > +	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
> > +				start, end);
> > +	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
> > +	tlb_gather_mmu(&tlb, vma->vm_mm);
> > +	update_hiwater_rss(vma->vm_mm);
> > +	mmu_notifier_invalidate_range_start(&range);
> > +
> > +	__unmap_hugepage_range_locking(&tlb, vma, start, end, NULL, 0);
> > +
> > +	mmu_notifier_invalidate_range_end(&range);
> > +	tlb_finish_mmu(&tlb);
> > }
> > +#endif
> 
> I hate ifdef’s. And the second definition of clear_hugetlb_page_range() is
> confusing since it does not have an ifdef at all. . How about moving the
> ifdef’s into the function like being done in io_madvise_prep()? I think it
> is less confusing.

Dang!!!  I sent the wrong version of the patch.  clear_hugetlb_page_range
is not even used here.  I used Peter's idea to eliminate the need for
this separate routine.  And, this was a development version.

Really sorry about that.

Andrew, if you can please drop the version of the patch.
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3568b90b397d..badcb277603d 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -158,6 +158,8 @@  long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 void unmap_hugepage_range(struct vm_area_struct *,
 			  unsigned long, unsigned long, struct page *,
 			  zap_flags_t);
+void clear_hugetlb_page_range(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end);
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 			  struct vm_area_struct *vma,
 			  unsigned long start, unsigned long end,
@@ -428,6 +430,11 @@  static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 	BUG();
 }
 
+static void __maybe_unused clear_hugetlb_page_range(struct vm_area_struct *vma,
+			unsigned long start, unsigned long end)
+{
+}
+
 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 			struct vm_area_struct *vma, unsigned long address,
 			unsigned int flags)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 978c17df053e..517c8cc8ccb9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3464,4 +3464,7 @@  madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
  */
 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
 
+/* Set in unmap_vmas() to indicate an unmap call.  Only used by hugetlb */
+#define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
+
 #endif /* _LINUX_MM_H */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4a0289ef09fa..7ba46fa62f75 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5072,7 +5072,6 @@  static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
 	struct page *page;
 	struct hstate *h = hstate_vma(vma);
 	unsigned long sz = huge_page_size(h);
-	struct mmu_notifier_range range;
 	unsigned long last_addr_mask;
 	bool force_flush = false;
 
@@ -5087,13 +5086,6 @@  static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
 	tlb_change_page_size(tlb, sz);
 	tlb_start_vma(tlb, vma);
 
-	/*
-	 * If sharing possible, alert mmu notifiers of worst case.
-	 */
-	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
-				end);
-	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
-	mmu_notifier_invalidate_range_start(&range);
 	last_addr_mask = hugetlb_mask_last_page(h);
 	address = start;
 	for (; address < end; address += sz) {
@@ -5178,7 +5170,6 @@  static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
 		if (ref_page)
 			break;
 	}
-	mmu_notifier_invalidate_range_end(&range);
 	tlb_end_vma(tlb, vma);
 
 	/*
@@ -5198,37 +5189,86 @@  static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
 		tlb_flush_mmu_tlbonly(tlb);
 }
 
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static void __unmap_hugepage_range_locking(struct mmu_gather *tlb,
 			  struct vm_area_struct *vma, unsigned long start,
 			  unsigned long end, struct page *ref_page,
 			  zap_flags_t zap_flags)
 {
+	bool final = zap_flags & ZAP_FLAG_UNMAP;
+
 	hugetlb_vma_lock_write(vma);
 	i_mmap_lock_write(vma->vm_file->f_mapping);
 
 	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
 
-	/*
-	 * Unlock and free the vma lock before releasing i_mmap_rwsem.  When
-	 * the vma_lock is freed, this makes the vma ineligible for pmd
-	 * sharing.  And, i_mmap_rwsem is required to set up pmd sharing.
-	 * This is important as page tables for this unmapped range will
-	 * be asynchrously deleted.  If the page tables are shared, there
-	 * will be issues when accessed by someone else.
-	 */
-	__hugetlb_vma_unlock_write_free(vma);
+	if (final) {
+		/*
+		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
+		 * When the vma_lock is freed, this makes the vma ineligible
+		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
+		 * pmd sharing.  This is important as page tables for this
+		 * unmapped range will be asynchrously deleted.  If the page
+		 * tables are shared, there will be issues when accessed by
+		 * someone else.
+		 */
+		__hugetlb_vma_unlock_write_free(vma);
+		i_mmap_unlock_write(vma->vm_file->f_mapping);
+	} else {
+		i_mmap_unlock_write(vma->vm_file->f_mapping);
+		hugetlb_vma_unlock_write(vma);
+	}
+}
 
-	i_mmap_unlock_write(vma->vm_file->f_mapping);
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+			  struct vm_area_struct *vma, unsigned long start,
+			  unsigned long end, struct page *ref_page,
+			  zap_flags_t zap_flags)
+{
+	__unmap_hugepage_range_locking(tlb, vma, start, end, ref_page,
+					zap_flags);
+}
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+/*
+ * Similar setup as in zap_page_range().  madvise(MADV_DONTNEED) can not call
+ * zap_page_range for hugetlb vmas as __unmap_hugepage_range_final will delete
+ * the associated vma_lock.
+ */
+void clear_hugetlb_page_range(struct vm_area_struct *vma, unsigned long start,
+				unsigned long end)
+{
+	struct mmu_notifier_range range;
+	struct mmu_gather tlb;
+
+	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+				start, end);
+	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
+	tlb_gather_mmu(&tlb, vma->vm_mm);
+	update_hiwater_rss(vma->vm_mm);
+	mmu_notifier_invalidate_range_start(&range);
+
+	__unmap_hugepage_range_locking(&tlb, vma, start, end, NULL, 0);
+
+	mmu_notifier_invalidate_range_end(&range);
+	tlb_finish_mmu(&tlb);
 }
+#endif
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 			  unsigned long end, struct page *ref_page,
 			  zap_flags_t zap_flags)
 {
+	struct mmu_notifier_range range;
 	struct mmu_gather tlb;
 
+	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
+				start, end);
+	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
 	tlb_gather_mmu(&tlb, vma->vm_mm);
+
 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
+
+	mmu_notifier_invalidate_range_end(&range);
 	tlb_finish_mmu(&tlb);
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index c5599a9279b1..ecd2b4a6cbc3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1671,7 +1671,7 @@  void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
 {
 	struct mmu_notifier_range range;
 	struct zap_details details = {
-		.zap_flags = ZAP_FLAG_DROP_MARKER,
+		.zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
 		/* Careful - we need to zap private pages too! */
 		.even_cows = true,
 	};
@@ -1704,15 +1704,21 @@  void zap_page_range(struct vm_area_struct *vma, unsigned long start,
 	MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
 
 	lru_add_drain();
-	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
-				start, start + size);
 	tlb_gather_mmu(&tlb, vma->vm_mm);
 	update_hiwater_rss(vma->vm_mm);
-	mmu_notifier_invalidate_range_start(&range);
 	do {
-		unmap_single_vma(&tlb, vma, start, range.end, NULL);
+		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma,
+				vma->vm_mm,
+				max(start, vma->vm_start),
+				min(start + size, vma->vm_end));
+		if (is_vm_hugetlb_page(vma))
+			adjust_range_if_pmd_sharing_possible(vma,
+				&range.start,
+				&range.end);
+		mmu_notifier_invalidate_range_start(&range);
+		unmap_single_vma(&tlb, vma, start, start + size, NULL);
+		mmu_notifier_invalidate_range_end(&range);
 	} while ((vma = mas_find(&mas, end - 1)) != NULL);
-	mmu_notifier_invalidate_range_end(&range);
 	tlb_finish_mmu(&tlb);
 }