@@ -198,6 +198,17 @@ static inline bool folio_is_fsdax(const struct folio *folio)
return is_fsdax_page(&folio->page);
}
+static inline bool is_devdax_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page_pgmap(page)->type == MEMORY_DEVICE_GENERIC;
+}
+
+static inline bool folio_is_devdax(const struct folio *folio)
+{
+ return is_devdax_page(&folio->page);
+}
+
#ifdef CONFIG_ZONE_DEVICE
void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
@@ -884,6 +884,12 @@ struct folio *folio_walk_start(struct folio_walk *fw,
* support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
*/
page = pud_page(pud);
+
+ if (is_devdax_page(page)) {
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
goto found;
}
@@ -911,7 +917,8 @@ struct folio *folio_walk_start(struct folio_walk *fw,
goto pte_table;
} else if (pmd_present(pmd)) {
page = vm_normal_page_pmd(vma, addr, pmd);
- if (page) {
+ if (page && !is_devdax_page(page) &&
+ !is_fsdax_page(page)) {
goto found;
} else if ((flags & FW_ZEROPAGE) &&
is_huge_zero_pmd(pmd)) {
@@ -945,7 +952,8 @@ struct folio *folio_walk_start(struct folio_walk *fw,
if (pte_present(pte)) {
page = vm_normal_page(vma, addr, pte);
- if (page)
+ if (page && !is_devdax_page(page) &&
+ !is_fsdax_page(page))
goto found;
if ((flags & FW_ZEROPAGE) &&
is_zero_pfn(pte_pfn(pte))) {
Previously dax pages were skipped by the pagewalk code as pud_special() or vm_normal_page{_pmd}() would be false for DAX pages. Now that dax pages are refcounted normally that is no longer the case, so add explicit checks to skip them. Signed-off-by: Alistair Popple <apopple@nvidia.com> --- include/linux/memremap.h | 11 +++++++++++ mm/pagewalk.c | 12 ++++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-)