@@ -672,11 +672,10 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
{
unsigned long pfn = pmd_pfn(pmd);
- /*
- * There is no pmd_special() but there may be special pmds, e.g.
- * in a direct-access (dax) mapping, so let's just replicate the
- * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
- */
+ /* Currently it's only used for huge pfnmaps */
+ if (unlikely(pmd_special(pmd)))
+ return NULL;
+
if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
if (vma->vm_flags & VM_MIXEDMAP) {
if (!pfn_valid(pfn))
@@ -783,7 +783,7 @@ struct folio *folio_walk_start(struct folio_walk *fw,
fw->pmdp = pmdp;
fw->pmd = pmd;
- if (pmd_none(pmd) || pmd_special(pmd)) {
+ if (pmd_none(pmd)) {
spin_unlock(ptl);
goto not_found;
} else if (!pmd_leaf(pmd)) {
Per David's suggestion, remove some stale comment in vm_normal_page_pmd() as we start to have special bit in pmd too. Meanwhile move the pmd_special() check here from folio_walk_start(). Signed-off-by: Peter Xu <peterx@redhat.com> --- Andrew, would you consider squashing this patch into the commit "mm/pagewalk: Check pfnmap for folio_walk_start()" in mm-unstable? This is so far the only thing I plan to update on the huge pfnmap series, thanks. --- mm/memory.c | 9 ++++----- mm/pagewalk.c | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-)