diff mbox series

[v5,21/25] arm64/mm: Implement new [get_and_]clear_full_ptes() batch APIs

Message ID 20240202080756.1453939-22-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series Transparent Contiguous PTEs for User Mappings | expand

Commit Message

Ryan Roberts Feb. 2, 2024, 8:07 a.m. UTC
Optimize the contpte implementation to fix some of the
exit/munmap/dontneed performance regression introduced by the initial
contpte commit. Subsequent patches will solve it entirely.

During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be
cleared. Previously this was done 1 PTE at a time. But the core-mm
supports batched clear via the new [get_and_]clear_full_ptes() APIs. So
let's implement those APIs and for fully covered contpte mappings, we no
longer need to unfold the contpte. This significantly reduces unfolding
operations, reducing the number of tlbis that must be issued.

Tested-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 arch/arm64/include/asm/pgtable.h | 67 ++++++++++++++++++++++++++++++++
 arch/arm64/mm/contpte.c          | 17 ++++++++
 2 files changed, 84 insertions(+)

Comments

Mark Rutland Feb. 13, 2024, 4:43 p.m. UTC | #1
On Fri, Feb 02, 2024 at 08:07:52AM +0000, Ryan Roberts wrote:
> Optimize the contpte implementation to fix some of the
> exit/munmap/dontneed performance regression introduced by the initial
> contpte commit. Subsequent patches will solve it entirely.
> 
> During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be
> cleared. Previously this was done 1 PTE at a time. But the core-mm
> supports batched clear via the new [get_and_]clear_full_ptes() APIs. So
> let's implement those APIs and for fully covered contpte mappings, we no
> longer need to unfold the contpte. This significantly reduces unfolding
> operations, reducing the number of tlbis that must be issued.
> 
> Tested-by: John Hubbard <jhubbard@nvidia.com>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>  arch/arm64/include/asm/pgtable.h | 67 ++++++++++++++++++++++++++++++++
>  arch/arm64/mm/contpte.c          | 17 ++++++++
>  2 files changed, 84 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index c07f0d563733..ad04adb7b87f 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -965,6 +965,37 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
>  	return pte;
>  }
>  
> +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> +				pte_t *ptep, unsigned int nr, int full)
> +{
> +	for (;;) {
> +		__ptep_get_and_clear(mm, addr, ptep);
> +		if (--nr == 0)
> +			break;
> +		ptep++;
> +		addr += PAGE_SIZE;
> +	}
> +}

The loop construct is a bit odd; can't this be:

	while (nr--) {
		__ptep_get_and_clear(mm, addr, ptep);
		ptep++;
		addr += PAGE_SIZE;
	}

... or:

	do {
		__ptep_get_and_clear(mm, addr, ptep);
		ptep++;
		addr += PAGE_SIZE;
	} while (--nr);

... ?

Otherwise, this looks good to me.

Mark.

> +
> +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
> +				unsigned long addr, pte_t *ptep,
> +				unsigned int nr, int full)
> +{
> +	pte_t pte, tmp_pte;
> +
> +	pte = __ptep_get_and_clear(mm, addr, ptep);
> +	while (--nr) {
> +		ptep++;
> +		addr += PAGE_SIZE;
> +		tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
> +		if (pte_dirty(tmp_pte))
> +			pte = pte_mkdirty(pte);
> +		if (pte_young(tmp_pte))
> +			pte = pte_mkyoung(pte);
> +	}
> +	return pte;
> +}
> +
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
>  static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
> @@ -1167,6 +1198,11 @@ extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
>  extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
>  extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
>  				pte_t *ptep, pte_t pte, unsigned int nr);
> +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> +				pte_t *ptep, unsigned int nr, int full);
> +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
> +				unsigned long addr, pte_t *ptep,
> +				unsigned int nr, int full);
>  extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
>  				unsigned long addr, pte_t *ptep);
>  extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
> @@ -1254,6 +1290,35 @@ static inline void pte_clear(struct mm_struct *mm,
>  	__pte_clear(mm, addr, ptep);
>  }
>  
> +#define clear_full_ptes clear_full_ptes
> +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> +				pte_t *ptep, unsigned int nr, int full)
> +{
> +	if (likely(nr == 1)) {
> +		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
> +		__clear_full_ptes(mm, addr, ptep, nr, full);
> +	} else {
> +		contpte_clear_full_ptes(mm, addr, ptep, nr, full);
> +	}
> +}
> +
> +#define get_and_clear_full_ptes get_and_clear_full_ptes
> +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
> +				unsigned long addr, pte_t *ptep,
> +				unsigned int nr, int full)
> +{
> +	pte_t pte;
> +
> +	if (likely(nr == 1)) {
> +		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
> +		pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
> +	} else {
> +		pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
> +	}
> +
> +	return pte;
> +}
> +
>  #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
>  static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>  				unsigned long addr, pte_t *ptep)
> @@ -1338,6 +1403,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
>  #define set_pte					__set_pte
>  #define set_ptes				__set_ptes
>  #define pte_clear				__pte_clear
> +#define clear_full_ptes				__clear_full_ptes
> +#define get_and_clear_full_ptes			__get_and_clear_full_ptes
>  #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
>  #define ptep_get_and_clear			__ptep_get_and_clear
>  #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
> diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
> index c85e64baf03b..80346108450b 100644
> --- a/arch/arm64/mm/contpte.c
> +++ b/arch/arm64/mm/contpte.c
> @@ -207,6 +207,23 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
>  }
>  EXPORT_SYMBOL(contpte_set_ptes);
>  
> +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> +				pte_t *ptep, unsigned int nr, int full)
> +{
> +	contpte_try_unfold_partial(mm, addr, ptep, nr);
> +	__clear_full_ptes(mm, addr, ptep, nr, full);
> +}
> +EXPORT_SYMBOL(contpte_clear_full_ptes);
> +
> +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
> +				unsigned long addr, pte_t *ptep,
> +				unsigned int nr, int full)
> +{
> +	contpte_try_unfold_partial(mm, addr, ptep, nr);
> +	return __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
> +}
> +EXPORT_SYMBOL(contpte_get_and_clear_full_ptes);
> +
>  int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
>  					unsigned long addr, pte_t *ptep)
>  {
> -- 
> 2.25.1
>
Ryan Roberts Feb. 13, 2024, 4:48 p.m. UTC | #2
On 13/02/2024 16:43, Mark Rutland wrote:
> On Fri, Feb 02, 2024 at 08:07:52AM +0000, Ryan Roberts wrote:
>> Optimize the contpte implementation to fix some of the
>> exit/munmap/dontneed performance regression introduced by the initial
>> contpte commit. Subsequent patches will solve it entirely.
>>
>> During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be
>> cleared. Previously this was done 1 PTE at a time. But the core-mm
>> supports batched clear via the new [get_and_]clear_full_ptes() APIs. So
>> let's implement those APIs and for fully covered contpte mappings, we no
>> longer need to unfold the contpte. This significantly reduces unfolding
>> operations, reducing the number of tlbis that must be issued.
>>
>> Tested-by: John Hubbard <jhubbard@nvidia.com>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>  arch/arm64/include/asm/pgtable.h | 67 ++++++++++++++++++++++++++++++++
>>  arch/arm64/mm/contpte.c          | 17 ++++++++
>>  2 files changed, 84 insertions(+)
>>
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index c07f0d563733..ad04adb7b87f 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -965,6 +965,37 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
>>  	return pte;
>>  }
>>  
>> +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
>> +				pte_t *ptep, unsigned int nr, int full)
>> +{
>> +	for (;;) {
>> +		__ptep_get_and_clear(mm, addr, ptep);
>> +		if (--nr == 0)
>> +			break;
>> +		ptep++;
>> +		addr += PAGE_SIZE;
>> +	}
>> +}
> 
> The loop construct is a bit odd; can't this be:

I found it a little odd at first, but its avoiding the ptep and addr increments
the last time through the loop. Its the preferred pattern for these functions in
core-mm. See default set_ptes(), wrprotect_ptes(), clear_full_ptes() in
include/linux/pgtable.h.

So I'd prefer to leave it as is so that we match them. What do you think?

> 
> 	while (nr--) {
> 		__ptep_get_and_clear(mm, addr, ptep);
> 		ptep++;
> 		addr += PAGE_SIZE;
> 	}
> 
> ... or:
> 
> 	do {
> 		__ptep_get_and_clear(mm, addr, ptep);
> 		ptep++;
> 		addr += PAGE_SIZE;
> 	} while (--nr);
> 
> ... ?
> 
> Otherwise, this looks good to me.
> 
> Mark.
> 
>> +
>> +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
>> +				unsigned long addr, pte_t *ptep,
>> +				unsigned int nr, int full)
>> +{
>> +	pte_t pte, tmp_pte;
>> +
>> +	pte = __ptep_get_and_clear(mm, addr, ptep);
>> +	while (--nr) {
>> +		ptep++;
>> +		addr += PAGE_SIZE;
>> +		tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
>> +		if (pte_dirty(tmp_pte))
>> +			pte = pte_mkdirty(pte);
>> +		if (pte_young(tmp_pte))
>> +			pte = pte_mkyoung(pte);
>> +	}
>> +	return pte;
>> +}
>> +
>>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>  #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
>>  static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
>> @@ -1167,6 +1198,11 @@ extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
>>  extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
>>  extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
>>  				pte_t *ptep, pte_t pte, unsigned int nr);
>> +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
>> +				pte_t *ptep, unsigned int nr, int full);
>> +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
>> +				unsigned long addr, pte_t *ptep,
>> +				unsigned int nr, int full);
>>  extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
>>  				unsigned long addr, pte_t *ptep);
>>  extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
>> @@ -1254,6 +1290,35 @@ static inline void pte_clear(struct mm_struct *mm,
>>  	__pte_clear(mm, addr, ptep);
>>  }
>>  
>> +#define clear_full_ptes clear_full_ptes
>> +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
>> +				pte_t *ptep, unsigned int nr, int full)
>> +{
>> +	if (likely(nr == 1)) {
>> +		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
>> +		__clear_full_ptes(mm, addr, ptep, nr, full);
>> +	} else {
>> +		contpte_clear_full_ptes(mm, addr, ptep, nr, full);
>> +	}
>> +}
>> +
>> +#define get_and_clear_full_ptes get_and_clear_full_ptes
>> +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
>> +				unsigned long addr, pte_t *ptep,
>> +				unsigned int nr, int full)
>> +{
>> +	pte_t pte;
>> +
>> +	if (likely(nr == 1)) {
>> +		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
>> +		pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
>> +	} else {
>> +		pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
>> +	}
>> +
>> +	return pte;
>> +}
>> +
>>  #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
>>  static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
>>  				unsigned long addr, pte_t *ptep)
>> @@ -1338,6 +1403,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
>>  #define set_pte					__set_pte
>>  #define set_ptes				__set_ptes
>>  #define pte_clear				__pte_clear
>> +#define clear_full_ptes				__clear_full_ptes
>> +#define get_and_clear_full_ptes			__get_and_clear_full_ptes
>>  #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
>>  #define ptep_get_and_clear			__ptep_get_and_clear
>>  #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
>> diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
>> index c85e64baf03b..80346108450b 100644
>> --- a/arch/arm64/mm/contpte.c
>> +++ b/arch/arm64/mm/contpte.c
>> @@ -207,6 +207,23 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
>>  }
>>  EXPORT_SYMBOL(contpte_set_ptes);
>>  
>> +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
>> +				pte_t *ptep, unsigned int nr, int full)
>> +{
>> +	contpte_try_unfold_partial(mm, addr, ptep, nr);
>> +	__clear_full_ptes(mm, addr, ptep, nr, full);
>> +}
>> +EXPORT_SYMBOL(contpte_clear_full_ptes);
>> +
>> +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
>> +				unsigned long addr, pte_t *ptep,
>> +				unsigned int nr, int full)
>> +{
>> +	contpte_try_unfold_partial(mm, addr, ptep, nr);
>> +	return __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
>> +}
>> +EXPORT_SYMBOL(contpte_get_and_clear_full_ptes);
>> +
>>  int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
>>  					unsigned long addr, pte_t *ptep)
>>  {
>> -- 
>> 2.25.1
>>
Mark Rutland Feb. 13, 2024, 4:53 p.m. UTC | #3
On Tue, Feb 13, 2024 at 04:48:50PM +0000, Ryan Roberts wrote:
> On 13/02/2024 16:43, Mark Rutland wrote:
> > On Fri, Feb 02, 2024 at 08:07:52AM +0000, Ryan Roberts wrote:

> >> +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> >> +				pte_t *ptep, unsigned int nr, int full)
> >> +{
> >> +	for (;;) {
> >> +		__ptep_get_and_clear(mm, addr, ptep);
> >> +		if (--nr == 0)
> >> +			break;
> >> +		ptep++;
> >> +		addr += PAGE_SIZE;
> >> +	}
> >> +}
> > 
> > The loop construct is a bit odd; can't this be:
> 
> I found it a little odd at first, but its avoiding the ptep and addr increments
> the last time through the loop. Its the preferred pattern for these functions in
> core-mm. See default set_ptes(), wrprotect_ptes(), clear_full_ptes() in
> include/linux/pgtable.h.
> 
> So I'd prefer to leave it as is so that we match them. What do you think?

That's fair enough; it I'm happy with it as-is.

Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c07f0d563733..ad04adb7b87f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -965,6 +965,37 @@  static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
 	return pte;
 }
 
+static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, unsigned int nr, int full)
+{
+	for (;;) {
+		__ptep_get_and_clear(mm, addr, ptep);
+		if (--nr == 0)
+			break;
+		ptep++;
+		addr += PAGE_SIZE;
+	}
+}
+
+static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
+				unsigned long addr, pte_t *ptep,
+				unsigned int nr, int full)
+{
+	pte_t pte, tmp_pte;
+
+	pte = __ptep_get_and_clear(mm, addr, ptep);
+	while (--nr) {
+		ptep++;
+		addr += PAGE_SIZE;
+		tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
+		if (pte_dirty(tmp_pte))
+			pte = pte_mkdirty(pte);
+		if (pte_young(tmp_pte))
+			pte = pte_mkyoung(pte);
+	}
+	return pte;
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
@@ -1167,6 +1198,11 @@  extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
 				pte_t *ptep, pte_t pte, unsigned int nr);
+extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, unsigned int nr, int full);
+extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
+				unsigned long addr, pte_t *ptep,
+				unsigned int nr, int full);
 extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
 				unsigned long addr, pte_t *ptep);
 extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
@@ -1254,6 +1290,35 @@  static inline void pte_clear(struct mm_struct *mm,
 	__pte_clear(mm, addr, ptep);
 }
 
+#define clear_full_ptes clear_full_ptes
+static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, unsigned int nr, int full)
+{
+	if (likely(nr == 1)) {
+		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+		__clear_full_ptes(mm, addr, ptep, nr, full);
+	} else {
+		contpte_clear_full_ptes(mm, addr, ptep, nr, full);
+	}
+}
+
+#define get_and_clear_full_ptes get_and_clear_full_ptes
+static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
+				unsigned long addr, pte_t *ptep,
+				unsigned int nr, int full)
+{
+	pte_t pte;
+
+	if (likely(nr == 1)) {
+		contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
+		pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+	} else {
+		pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+	}
+
+	return pte;
+}
+
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 				unsigned long addr, pte_t *ptep)
@@ -1338,6 +1403,8 @@  static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 #define set_pte					__set_pte
 #define set_ptes				__set_ptes
 #define pte_clear				__pte_clear
+#define clear_full_ptes				__clear_full_ptes
+#define get_and_clear_full_ptes			__get_and_clear_full_ptes
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 #define ptep_get_and_clear			__ptep_get_and_clear
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c
index c85e64baf03b..80346108450b 100644
--- a/arch/arm64/mm/contpte.c
+++ b/arch/arm64/mm/contpte.c
@@ -207,6 +207,23 @@  void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
 }
 EXPORT_SYMBOL(contpte_set_ptes);
 
+void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep, unsigned int nr, int full)
+{
+	contpte_try_unfold_partial(mm, addr, ptep, nr);
+	__clear_full_ptes(mm, addr, ptep, nr, full);
+}
+EXPORT_SYMBOL(contpte_clear_full_ptes);
+
+pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
+				unsigned long addr, pte_t *ptep,
+				unsigned int nr, int full)
+{
+	contpte_try_unfold_partial(mm, addr, ptep, nr);
+	return __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
+}
+EXPORT_SYMBOL(contpte_get_and_clear_full_ptes);
+
 int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
 					unsigned long addr, pte_t *ptep)
 {