@@ -2802,6 +2802,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
+#define FOLL_ALLOW_POISONED 0x100000 /* bypass poison check */
/*
* FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
@@ -384,22 +384,29 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
pte = *ptep;
if (!pte_present(pte)) {
- swp_entry_t entry;
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if (pte_none(pte))
+ goto no_page;
+
/*
* KSM's break_ksm() relies upon recognizing a ksm page
* even while it is being migrated, so for that case we
* need migration_entry_wait().
*/
- if (likely(!(flags & FOLL_MIGRATION)))
- goto no_page;
- if (pte_none(pte))
- goto no_page;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry))
- goto no_page;
- pte_unmap_unlock(ptep, ptl);
- migration_entry_wait(mm, pmd, address);
- goto retry;
+ if (is_migration_entry(entry) && (flags & FOLL_MIGRATION)) {
+ pte_unmap_unlock(ptep, ptl);
+ migration_entry_wait(mm, pmd, address);
+ goto retry;
+ }
+
+ if (is_hwpoison_entry(entry) && (flags & FOLL_ALLOW_POISONED)) {
+ page = hwpoison_entry_to_page(entry);
+ get_page(page);
+ goto out;
+ }
+
+ goto no_page;
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
The new flag allows to bypass check if the page is poisoned and get reference on it. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> --- include/linux/mm.h | 1 + mm/gup.c | 29 ++++++++++++++++++----------- 2 files changed, 19 insertions(+), 11 deletions(-)