diff mbox series

[v2,04/46] hugetlb: only adjust address ranges when VMAs want PMD sharing

Message ID 20230218002819.1486479-5-jthoughton@google.com (mailing list archive)
State New
Headers show
Series hugetlb: introduce HugeTLB high-granularity mapping | expand

Commit Message

James Houghton Feb. 18, 2023, 12:27 a.m. UTC
Currently this check is overly aggressive. For some userfaultfd VMAs,
VMA sharing is disabled, yet we still widen the address range, which is
used for flushing TLBs and sending MMU notifiers.

This is done now, as HGM VMAs also have sharing disabled, yet would
still have flush ranges adjusted. Overaggressively flushing TLBs and
triggering MMU notifiers is particularly harmful with lots of
high-granularity operations.

Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: James Houghton <jthoughton@google.com>

Comments

Mina Almasry Feb. 18, 2023, 1:10 a.m. UTC | #1
On Fri, Feb 17, 2023 at 4:28 PM James Houghton <jthoughton@google.com> wrote:
>
> Currently this check is overly aggressive. For some userfaultfd VMAs,
> VMA sharing is disabled, yet we still widen the address range, which is
> used for flushing TLBs and sending MMU notifiers.
>
> This is done now, as HGM VMAs also have sharing disabled, yet would
> still have flush ranges adjusted. Overaggressively flushing TLBs and
> triggering MMU notifiers is particularly harmful with lots of
> high-granularity operations.
>
> Acked-by: Peter Xu <peterx@redhat.com>
> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
> Signed-off-by: James Houghton <jthoughton@google.com>

Acked-by: Mina Almasry <almasrymina@google.com>

>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 540cdf9570d3..08004371cfed 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6999,22 +6999,31 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
>         return saddr;
>  }
>
> -bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
> +static bool pmd_sharing_possible(struct vm_area_struct *vma)
>  {
> -       unsigned long start = addr & PUD_MASK;
> -       unsigned long end = start + PUD_SIZE;
> -
>  #ifdef CONFIG_USERFAULTFD
>         if (uffd_disable_huge_pmd_share(vma))
>                 return false;
>  #endif
>         /*
> -        * check on proper vm_flags and page table alignment
> +        * Only shared VMAs can share PMDs.
>          */
>         if (!(vma->vm_flags & VM_MAYSHARE))
>                 return false;
>         if (!vma->vm_private_data)      /* vma lock required for sharing */
>                 return false;
> +       return true;
> +}
> +
> +bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
> +{
> +       unsigned long start = addr & PUD_MASK;
> +       unsigned long end = start + PUD_SIZE;
> +       /*
> +        * check on proper vm_flags and page table alignment
> +        */
> +       if (!pmd_sharing_possible(vma))
> +               return false;
>         if (!range_in_vma(vma, start, end))
>                 return false;
>         return true;
> @@ -7035,7 +7044,7 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
>          * vma needs to span at least one aligned PUD size, and the range
>          * must be at least partially within in.
>          */
> -       if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
> +       if (!pmd_sharing_possible(vma) || !(v_end > v_start) ||
>                 (*end <= v_start) || (*start >= v_end))
>                 return;
>
> --
> 2.39.2.637.g21b0678d19-goog
>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 540cdf9570d3..08004371cfed 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6999,22 +6999,31 @@  static unsigned long page_table_shareable(struct vm_area_struct *svma,
 	return saddr;
 }
 
-bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
+static bool pmd_sharing_possible(struct vm_area_struct *vma)
 {
-	unsigned long start = addr & PUD_MASK;
-	unsigned long end = start + PUD_SIZE;
-
 #ifdef CONFIG_USERFAULTFD
 	if (uffd_disable_huge_pmd_share(vma))
 		return false;
 #endif
 	/*
-	 * check on proper vm_flags and page table alignment
+	 * Only shared VMAs can share PMDs.
 	 */
 	if (!(vma->vm_flags & VM_MAYSHARE))
 		return false;
 	if (!vma->vm_private_data)	/* vma lock required for sharing */
 		return false;
+	return true;
+}
+
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
+{
+	unsigned long start = addr & PUD_MASK;
+	unsigned long end = start + PUD_SIZE;
+	/*
+	 * check on proper vm_flags and page table alignment
+	 */
+	if (!pmd_sharing_possible(vma))
+		return false;
 	if (!range_in_vma(vma, start, end))
 		return false;
 	return true;
@@ -7035,7 +7044,7 @@  void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 	 * vma needs to span at least one aligned PUD size, and the range
 	 * must be at least partially within in.
 	 */
-	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
+	if (!pmd_sharing_possible(vma) || !(v_end > v_start) ||
 		(*end <= v_start) || (*start >= v_end))
 		return;