diff mbox series

[01/31] mm: use pmdp_get_lockless() without surplus barrier()

Message ID 34467cca-58b6-3e64-1ee7-e3dc43257a@google.com (mailing list archive)
State New
Headers show
Series mm: allow pte_offset_map[_lock]() to fail | expand

Commit Message

Hugh Dickins May 22, 2023, 4:49 a.m. UTC
Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
reliable result with PAE (or READ_ONCE as before without PAE); and remove
the unnecessary extra barrier()s which got left behind in its callers.

HOWEVER: Note the small print in linux/pgtable.h, where it was designed
specifically for fast GUP, and depends on interrupts being disabled for
its full guarantee: most callers which have been added (here and before)
do NOT have interrupts disabled, so there is still some need for caution.

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 fs/userfaultfd.c        | 10 +---------
 include/linux/pgtable.h | 17 -----------------
 mm/gup.c                |  6 +-----
 mm/hmm.c                |  2 +-
 mm/khugepaged.c         |  5 -----
 mm/ksm.c                |  3 +--
 mm/memory.c             | 14 ++------------
 mm/mprotect.c           |  5 -----
 mm/page_vma_mapped.c    |  2 +-
 9 files changed, 7 insertions(+), 57 deletions(-)

Comments

Peter Xu May 24, 2023, 10:29 p.m. UTC | #1
On Sun, May 21, 2023 at 09:49:45PM -0700, Hugh Dickins wrote:
> Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
> reliable result with PAE (or READ_ONCE as before without PAE); and remove
> the unnecessary extra barrier()s which got left behind in its callers.

Pure question: does it mean that some of below path (missing barrier()
ones) could have problem when CONFIG_PAE, hence this can be seen as a
(potential) bug fix?

Thanks,

> 
> HOWEVER: Note the small print in linux/pgtable.h, where it was designed
> specifically for fast GUP, and depends on interrupts being disabled for
> its full guarantee: most callers which have been added (here and before)
> do NOT have interrupts disabled, so there is still some need for caution.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
>  fs/userfaultfd.c        | 10 +---------
>  include/linux/pgtable.h | 17 -----------------
>  mm/gup.c                |  6 +-----
>  mm/hmm.c                |  2 +-
>  mm/khugepaged.c         |  5 -----
>  mm/ksm.c                |  3 +--
>  mm/memory.c             | 14 ++------------
>  mm/mprotect.c           |  5 -----
>  mm/page_vma_mapped.c    |  2 +-
>  9 files changed, 7 insertions(+), 57 deletions(-)
> 
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 0fd96d6e39ce..f7a0817b1ec0 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -349,15 +349,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
>  	if (!pud_present(*pud))
>  		goto out;
>  	pmd = pmd_offset(pud, address);
> -	/*
> -	 * READ_ONCE must function as a barrier with narrower scope
> -	 * and it must be equivalent to:
> -	 *	_pmd = *pmd; barrier();
> -	 *
> -	 * This is to deal with the instability (as in
> -	 * pmd_trans_unstable) of the pmd.
> -	 */
> -	_pmd = READ_ONCE(*pmd);
> +	_pmd = pmdp_get_lockless(pmd);
>  	if (pmd_none(_pmd))
>  		goto out;
>  
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index c5a51481bbb9..8ec27fe69dc8 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1344,23 +1344,6 @@ static inline int pud_trans_unstable(pud_t *pud)
>  static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
>  {
>  	pmd_t pmdval = pmdp_get_lockless(pmd);
> -	/*
> -	 * The barrier will stabilize the pmdval in a register or on
> -	 * the stack so that it will stop changing under the code.
> -	 *
> -	 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
> -	 * pmdp_get_lockless is allowed to return a not atomic pmdval
> -	 * (for example pointing to an hugepage that has never been
> -	 * mapped in the pmd). The below checks will only care about
> -	 * the low part of the pmd with 32bit PAE x86 anyway, with the
> -	 * exception of pmd_none(). So the important thing is that if
> -	 * the low part of the pmd is found null, the high part will
> -	 * be also null or the pmd_none() check below would be
> -	 * confused.
> -	 */
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	barrier();
> -#endif
>  	/*
>  	 * !pmd_present() checks for pmd migration entries
>  	 *
> diff --git a/mm/gup.c b/mm/gup.c
> index bbe416236593..3bd5d3854c51 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -653,11 +653,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
>  	struct mm_struct *mm = vma->vm_mm;
>  
>  	pmd = pmd_offset(pudp, address);
> -	/*
> -	 * The READ_ONCE() will stabilize the pmdval in a register or
> -	 * on the stack so that it will stop changing under the code.
> -	 */
> -	pmdval = READ_ONCE(*pmd);
> +	pmdval = pmdp_get_lockless(pmd);
>  	if (pmd_none(pmdval))
>  		return no_page_table(vma, flags);
>  	if (!pmd_present(pmdval))
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 6a151c09de5e..e23043345615 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -332,7 +332,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
>  	pmd_t pmd;
>  
>  again:
> -	pmd = READ_ONCE(*pmdp);
> +	pmd = pmdp_get_lockless(pmdp);
>  	if (pmd_none(pmd))
>  		return hmm_vma_walk_hole(start, end, -1, walk);
>  
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 6b9d39d65b73..732f9ac393fc 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -961,11 +961,6 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
>  		return SCAN_PMD_NULL;
>  
>  	pmde = pmdp_get_lockless(*pmd);
> -
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
> -	barrier();
> -#endif
>  	if (pmd_none(pmde))
>  		return SCAN_PMD_NONE;
>  	if (!pmd_present(pmde))
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 0156bded3a66..df2aa281d49d 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -1194,8 +1194,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
>  	 * without holding anon_vma lock for write.  So when looking for a
>  	 * genuine pmde (in which to find pte), test present and !THP together.
>  	 */
> -	pmde = *pmd;
> -	barrier();
> +	pmde = pmdp_get_lockless(pmd);
>  	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
>  		goto out;
>  
> diff --git a/mm/memory.c b/mm/memory.c
> index f69fbc251198..2eb54c0d5d3c 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4925,18 +4925,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
>  		 * So now it's safe to run pte_offset_map().
>  		 */
>  		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
> -		vmf->orig_pte = *vmf->pte;
> +		vmf->orig_pte = ptep_get_lockless(vmf->pte);
>  		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
>  
> -		/*
> -		 * some architectures can have larger ptes than wordsize,
> -		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
> -		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
> -		 * accesses.  The code below just needs a consistent view
> -		 * for the ifs and we later double check anyway with the
> -		 * ptl lock held. So here a barrier will do.
> -		 */
> -		barrier();
>  		if (pte_none(vmf->orig_pte)) {
>  			pte_unmap(vmf->pte);
>  			vmf->pte = NULL;
> @@ -5060,9 +5051,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>  		if (!(ret & VM_FAULT_FALLBACK))
>  			return ret;
>  	} else {
> -		vmf.orig_pmd = *vmf.pmd;
> +		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
>  
> -		barrier();
>  		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
>  			VM_BUG_ON(thp_migration_supported() &&
>  					  !is_pmd_migration_entry(vmf.orig_pmd));
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 92d3d3ca390a..c5a13c0f1017 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -309,11 +309,6 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
>  {
>  	pmd_t pmdval = pmdp_get_lockless(pmd);
>  
> -	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	barrier();
> -#endif
> -
>  	if (pmd_none(pmdval))
>  		return 1;
>  	if (pmd_trans_huge(pmdval))
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 4e448cfbc6ef..64aff6718bdb 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -210,7 +210,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  		 * compiler and used as a stale value after we've observed a
>  		 * subsequent update.
>  		 */
> -		pmde = READ_ONCE(*pvmw->pmd);
> +		pmde = pmdp_get_lockless(pvmw->pmd);
>  
>  		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
>  		    (pmd_present(pmde) && pmd_devmap(pmde))) {
> -- 
> 2.35.3
>
Yu Zhao May 24, 2023, 10:54 p.m. UTC | #2
On Sun, May 21, 2023 at 10:49 PM Hugh Dickins <hughd@google.com> wrote:
>
> Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
> reliable result with PAE (or READ_ONCE as before without PAE); and remove
> the unnecessary extra barrier()s which got left behind in its callers.
>
> HOWEVER: Note the small print in linux/pgtable.h, where it was designed
> specifically for fast GUP, and depends on interrupts being disabled for
> its full guarantee: most callers which have been added (here and before)
> do NOT have interrupts disabled, so there is still some need for caution.
>
> Signed-off-by: Hugh Dickins <hughd@google.com>

Acked-by: Yu Zhao <yuzhao@google.com>

The previous ask here:
https://lore.kernel.org/r/CAOUHufZo=fB2HcaCrj2aidLJ2zEhOpi7ou5M_7qOQiuQq8+wTQ@mail.gmail.com/
Hugh Dickins May 25, 2023, 10:35 p.m. UTC | #3
On Wed, 24 May 2023, Peter Xu wrote:
> On Sun, May 21, 2023 at 09:49:45PM -0700, Hugh Dickins wrote:
> > Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
> > reliable result with PAE (or READ_ONCE as before without PAE); and remove
> > the unnecessary extra barrier()s which got left behind in its callers.
> 
> Pure question: does it mean that some of below path (missing barrier()
> ones) could have problem when CONFIG_PAE, hence this can be seen as a
> (potential) bug fix?

I don't think so; or at least, I am not claiming that this fixes any.

It really depends on what use is made of the pmdval afterwards, and
I've not checked through them.  The READ_ONCE()s which were there,
were good enough to make sure that the compiler did not reevaluate
the pmdval later on, with perhaps a confusingly different result.

But, at least in the x86 PAE case, they were not good enough to ensure
that the two halves of the entry match up; and, sad to say, nor is that
absolutely guaranteed by these conversions to pmdp_get_lockless() -
because of the "HOWEVER" below.  PeterZ's comments in linux/pgtable.h
are well worth reading through.

You might question why I made these changes at all: some days
I question them too.  Better though imperfect?  Or deceptive?

Hugh

> > 
> > HOWEVER: Note the small print in linux/pgtable.h, where it was designed
> > specifically for fast GUP, and depends on interrupts being disabled for
> > its full guarantee: most callers which have been added (here and before)
> > do NOT have interrupts disabled, so there is still some need for caution.
Peter Xu May 26, 2023, 4:48 p.m. UTC | #4
On Thu, May 25, 2023 at 03:35:01PM -0700, Hugh Dickins wrote:
> On Wed, 24 May 2023, Peter Xu wrote:
> > On Sun, May 21, 2023 at 09:49:45PM -0700, Hugh Dickins wrote:
> > > Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more
> > > reliable result with PAE (or READ_ONCE as before without PAE); and remove
> > > the unnecessary extra barrier()s which got left behind in its callers.
> > 
> > Pure question: does it mean that some of below path (missing barrier()
> > ones) could have problem when CONFIG_PAE, hence this can be seen as a
> > (potential) bug fix?
> 
> I don't think so; or at least, I am not claiming that this fixes any.
> 
> It really depends on what use is made of the pmdval afterwards, and
> I've not checked through them.  The READ_ONCE()s which were there,
> were good enough to make sure that the compiler did not reevaluate
> the pmdval later on, with perhaps a confusingly different result.
> 
> But, at least in the x86 PAE case, they were not good enough to ensure
> that the two halves of the entry match up; and, sad to say, nor is that
> absolutely guaranteed by these conversions to pmdp_get_lockless() -
> because of the "HOWEVER" below.  PeterZ's comments in linux/pgtable.h
> are well worth reading through.

Yes exactly - that's one major thing of my confusion on using
{ptep|pmdp}_get_lockless().

In irqoff ctx, AFAICT we can see a totally messed up pte/pmd with present
bit set if extremely unlucky. E.g. it can race with something like
"DONTNEED (contains tlbflush) then a POPULATE_WRITE" so we can have
"present -> present" conversion of pte when reading, so we can read half
pfn1 and then the other half pfn2.

The other confusing thing on this _lockless trick on PAE is, I think it
_might_ go wrong with devmap..

The problem is here we assumed even if high & low may not match, we still
can rely on most pte/pmd checks are done only on low bits (except _none()
check) to guarantee at least the checks are still atomic on low bits.

But it seems to me it's not true anymore if with pmd_trans_huge() after
devmap introduced, e.g.:

static inline int pmd_trans_huge(pmd_t pmd)
{
	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
}

#define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */

#define _PAGE_DEVMAP	(_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define _PAGE_BIT_DEVMAP	_PAGE_BIT_SOFTW4
#define _PAGE_BIT_SOFTW4	58	/* available for programmer */

So after devmap with CONFIG_PAE, pmd_trans_huge() checks more than low bits
but also high bits.  I didn't go further to check whether there can be any
real issue but IIUC that's not expected when the low/high trick introduced
(originally introduced in commit e585513b76f7b05d sololy for x86 PAE
fast-gup only).

> 
> You might question why I made these changes at all: some days
> I question them too.  Better though imperfect?  Or deceptive?

I think it's probably a separate topic to address in all cases, so I think
this patch still make it slightly better on barrier() which I agree:

Acked-by: Peter Xu <peterx@redhat.com>

Thanks,
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 0fd96d6e39ce..f7a0817b1ec0 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -349,15 +349,7 @@  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
 	if (!pud_present(*pud))
 		goto out;
 	pmd = pmd_offset(pud, address);
-	/*
-	 * READ_ONCE must function as a barrier with narrower scope
-	 * and it must be equivalent to:
-	 *	_pmd = *pmd; barrier();
-	 *
-	 * This is to deal with the instability (as in
-	 * pmd_trans_unstable) of the pmd.
-	 */
-	_pmd = READ_ONCE(*pmd);
+	_pmd = pmdp_get_lockless(pmd);
 	if (pmd_none(_pmd))
 		goto out;
 
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c5a51481bbb9..8ec27fe69dc8 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1344,23 +1344,6 @@  static inline int pud_trans_unstable(pud_t *pud)
 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 {
 	pmd_t pmdval = pmdp_get_lockless(pmd);
-	/*
-	 * The barrier will stabilize the pmdval in a register or on
-	 * the stack so that it will stop changing under the code.
-	 *
-	 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
-	 * pmdp_get_lockless is allowed to return a not atomic pmdval
-	 * (for example pointing to an hugepage that has never been
-	 * mapped in the pmd). The below checks will only care about
-	 * the low part of the pmd with 32bit PAE x86 anyway, with the
-	 * exception of pmd_none(). So the important thing is that if
-	 * the low part of the pmd is found null, the high part will
-	 * be also null or the pmd_none() check below would be
-	 * confused.
-	 */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	barrier();
-#endif
 	/*
 	 * !pmd_present() checks for pmd migration entries
 	 *
diff --git a/mm/gup.c b/mm/gup.c
index bbe416236593..3bd5d3854c51 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -653,11 +653,7 @@  static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 	struct mm_struct *mm = vma->vm_mm;
 
 	pmd = pmd_offset(pudp, address);
-	/*
-	 * The READ_ONCE() will stabilize the pmdval in a register or
-	 * on the stack so that it will stop changing under the code.
-	 */
-	pmdval = READ_ONCE(*pmd);
+	pmdval = pmdp_get_lockless(pmd);
 	if (pmd_none(pmdval))
 		return no_page_table(vma, flags);
 	if (!pmd_present(pmdval))
diff --git a/mm/hmm.c b/mm/hmm.c
index 6a151c09de5e..e23043345615 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -332,7 +332,7 @@  static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	pmd_t pmd;
 
 again:
-	pmd = READ_ONCE(*pmdp);
+	pmd = pmdp_get_lockless(pmdp);
 	if (pmd_none(pmd))
 		return hmm_vma_walk_hole(start, end, -1, walk);
 
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6b9d39d65b73..732f9ac393fc 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -961,11 +961,6 @@  static int find_pmd_or_thp_or_none(struct mm_struct *mm,
 		return SCAN_PMD_NULL;
 
 	pmde = pmdp_get_lockless(*pmd);
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	/* See comments in pmd_none_or_trans_huge_or_clear_bad() */
-	barrier();
-#endif
 	if (pmd_none(pmde))
 		return SCAN_PMD_NONE;
 	if (!pmd_present(pmde))
diff --git a/mm/ksm.c b/mm/ksm.c
index 0156bded3a66..df2aa281d49d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1194,8 +1194,7 @@  static int replace_page(struct vm_area_struct *vma, struct page *page,
 	 * without holding anon_vma lock for write.  So when looking for a
 	 * genuine pmde (in which to find pte), test present and !THP together.
 	 */
-	pmde = *pmd;
-	barrier();
+	pmde = pmdp_get_lockless(pmd);
 	if (!pmd_present(pmde) || pmd_trans_huge(pmde))
 		goto out;
 
diff --git a/mm/memory.c b/mm/memory.c
index f69fbc251198..2eb54c0d5d3c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4925,18 +4925,9 @@  static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 		 * So now it's safe to run pte_offset_map().
 		 */
 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
-		vmf->orig_pte = *vmf->pte;
+		vmf->orig_pte = ptep_get_lockless(vmf->pte);
 		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
 
-		/*
-		 * some architectures can have larger ptes than wordsize,
-		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
-		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
-		 * accesses.  The code below just needs a consistent view
-		 * for the ifs and we later double check anyway with the
-		 * ptl lock held. So here a barrier will do.
-		 */
-		barrier();
 		if (pte_none(vmf->orig_pte)) {
 			pte_unmap(vmf->pte);
 			vmf->pte = NULL;
@@ -5060,9 +5051,8 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
 	} else {
-		vmf.orig_pmd = *vmf.pmd;
+		vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
 
-		barrier();
 		if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
 			VM_BUG_ON(thp_migration_supported() &&
 					  !is_pmd_migration_entry(vmf.orig_pmd));
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 92d3d3ca390a..c5a13c0f1017 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -309,11 +309,6 @@  static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
 {
 	pmd_t pmdval = pmdp_get_lockless(pmd);
 
-	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	barrier();
-#endif
-
 	if (pmd_none(pmdval))
 		return 1;
 	if (pmd_trans_huge(pmdval))
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 4e448cfbc6ef..64aff6718bdb 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -210,7 +210,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 		 * compiler and used as a stale value after we've observed a
 		 * subsequent update.
 		 */
-		pmde = READ_ONCE(*pvmw->pmd);
+		pmde = pmdp_get_lockless(pvmw->pmd);
 
 		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
 		    (pmd_present(pmde) && pmd_devmap(pmde))) {