Message ID | 20240215103205.2607016-15-ryan.roberts@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Transparent Contiguous PTEs for User Mappings | expand |
On Thu, Feb 15, 2024 at 10:32:01AM +0000, Ryan Roberts wrote: > Optimize the contpte implementation to fix some of the > exit/munmap/dontneed performance regression introduced by the initial > contpte commit. Subsequent patches will solve it entirely. > > During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be > cleared. Previously this was done 1 PTE at a time. But the core-mm > supports batched clear via the new [get_and_]clear_full_ptes() APIs. So > let's implement those APIs and for fully covered contpte mappings, we no > longer need to unfold the contpte. This significantly reduces unfolding > operations, reducing the number of tlbis that must be issued. > > Tested-by: John Hubbard <jhubbard@nvidia.com> > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Mark. > --- > arch/arm64/include/asm/pgtable.h | 67 ++++++++++++++++++++++++++++++++ > arch/arm64/mm/contpte.c | 17 ++++++++ > 2 files changed, 84 insertions(+) > > diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h > index 8643227c318b..a8f1a35e3086 100644 > --- a/arch/arm64/include/asm/pgtable.h > +++ b/arch/arm64/include/asm/pgtable.h > @@ -965,6 +965,37 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, > return pte; > } > > +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, unsigned int nr, int full) > +{ > + for (;;) { > + __ptep_get_and_clear(mm, addr, ptep); > + if (--nr == 0) > + break; > + ptep++; > + addr += PAGE_SIZE; > + } > +} > + > +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, > + unsigned long addr, pte_t *ptep, > + unsigned int nr, int full) > +{ > + pte_t pte, tmp_pte; > + > + pte = __ptep_get_and_clear(mm, addr, ptep); > + while (--nr) { > + ptep++; > + addr += PAGE_SIZE; > + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); > + if (pte_dirty(tmp_pte)) > + pte = pte_mkdirty(pte); > + if (pte_young(tmp_pte)) > + pte = pte_mkyoung(pte); > + } > + return pte; > +} > + > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR > static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, > @@ -1160,6 +1191,11 @@ extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); > extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); > extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pte, unsigned int nr); > +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, unsigned int nr, int full); > +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, > + unsigned long addr, pte_t *ptep, > + unsigned int nr, int full); > extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, > unsigned long addr, pte_t *ptep); > extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, > @@ -1253,6 +1289,35 @@ static inline void pte_clear(struct mm_struct *mm, > __pte_clear(mm, addr, ptep); > } > > +#define clear_full_ptes clear_full_ptes > +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, unsigned int nr, int full) > +{ > + if (likely(nr == 1)) { > + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); > + __clear_full_ptes(mm, addr, ptep, nr, full); > + } else { > + contpte_clear_full_ptes(mm, addr, ptep, nr, full); > + } > +} > + > +#define get_and_clear_full_ptes get_and_clear_full_ptes > +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, > + unsigned long addr, pte_t *ptep, > + unsigned int nr, int full) > +{ > + pte_t pte; > + > + if (likely(nr == 1)) { > + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); > + pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); > + } else { > + pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); > + } > + > + return pte; > +} > + > #define __HAVE_ARCH_PTEP_GET_AND_CLEAR > static inline pte_t ptep_get_and_clear(struct mm_struct *mm, > unsigned long addr, pte_t *ptep) > @@ -1337,6 +1402,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, > #define set_pte __set_pte > #define set_ptes __set_ptes > #define pte_clear __pte_clear > +#define clear_full_ptes __clear_full_ptes > +#define get_and_clear_full_ptes __get_and_clear_full_ptes > #define __HAVE_ARCH_PTEP_GET_AND_CLEAR > #define ptep_get_and_clear __ptep_get_and_clear > #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG > diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c > index bedb58524535..50e0173dc5ee 100644 > --- a/arch/arm64/mm/contpte.c > +++ b/arch/arm64/mm/contpte.c > @@ -212,6 +212,23 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, > } > EXPORT_SYMBOL(contpte_set_ptes); > > +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, > + pte_t *ptep, unsigned int nr, int full) > +{ > + contpte_try_unfold_partial(mm, addr, ptep, nr); > + __clear_full_ptes(mm, addr, ptep, nr, full); > +} > +EXPORT_SYMBOL(contpte_clear_full_ptes); > + > +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, > + unsigned long addr, pte_t *ptep, > + unsigned int nr, int full) > +{ > + contpte_try_unfold_partial(mm, addr, ptep, nr); > + return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); > +} > +EXPORT_SYMBOL(contpte_get_and_clear_full_ptes); > + > int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, > unsigned long addr, pte_t *ptep) > { > -- > 2.25.1 >
On Thu, Feb 15, 2024 at 10:32:01AM +0000, Ryan Roberts wrote: > Optimize the contpte implementation to fix some of the > exit/munmap/dontneed performance regression introduced by the initial > contpte commit. Subsequent patches will solve it entirely. > > During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be > cleared. Previously this was done 1 PTE at a time. But the core-mm > supports batched clear via the new [get_and_]clear_full_ptes() APIs. So > let's implement those APIs and for fully covered contpte mappings, we no > longer need to unfold the contpte. This significantly reduces unfolding > operations, reducing the number of tlbis that must be issued. > > Tested-by: John Hubbard <jhubbard@nvidia.com> > Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8643227c318b..a8f1a35e3086 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -965,6 +965,37 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, return pte; } +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + __ptep_get_and_clear(mm, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} + +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte, tmp_pte; + + pte = __ptep_get_and_clear(mm, addr, ptep); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -1160,6 +1191,11 @@ extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr); +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full); +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full); extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, @@ -1253,6 +1289,35 @@ static inline void pte_clear(struct mm_struct *mm, __pte_clear(mm, addr, ptep); } +#define clear_full_ptes clear_full_ptes +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __clear_full_ptes(mm, addr, ptep, nr, full); + } else { + contpte_clear_full_ptes(mm, addr, ptep, nr, full); + } +} + +#define get_and_clear_full_ptes get_and_clear_full_ptes +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte; + + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } else { + pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } + + return pte; +} + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -1337,6 +1402,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, #define set_pte __set_pte #define set_ptes __set_ptes #define pte_clear __pte_clear +#define clear_full_ptes __clear_full_ptes +#define get_and_clear_full_ptes __get_and_clear_full_ptes #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define ptep_get_and_clear __ptep_get_and_clear #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index bedb58524535..50e0173dc5ee 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -212,6 +212,23 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL(contpte_set_ptes); +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + __clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL(contpte_clear_full_ptes); + +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL(contpte_get_and_clear_full_ptes); + int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) {