@@ -340,14 +340,17 @@ static inline enum pte_flush_type pte_flags_flush_type(unsigned long oldflags,
* whether a strict or relaxed TLB flush is need. It should only be used on
* userspace PTEs.
*/
-static inline enum pte_flush_type pte_flush_type(pte_t oldpte, pte_t newpte)
+static inline enum pte_flush_type pte_flush_type(pte_t oldpte, pte_t newpte,
+ bool check_pfn)
{
/* !PRESENT -> * ; no need for flush */
if (!(pte_flags(oldpte) & _PAGE_PRESENT))
return PTE_FLUSH_NONE;
/* PFN changed ; needs flush */
- if (pte_pfn(oldpte) != pte_pfn(newpte))
+ if (!check_pfn)
+ VM_BUG_ON(pte_pfn(oldpte) != pte_pfn(newpte));
+ else if (pte_pfn(oldpte) != pte_pfn(newpte))
return PTE_FLUSH_STRICT;
/*
@@ -363,14 +366,17 @@ static inline enum pte_flush_type pte_flush_type(pte_t oldpte, pte_t newpte)
* huge_pmd_flush_type() checks whether permissions were demoted and require a
* flush. It should only be used for userspace huge PMDs.
*/
-static inline enum pte_flush_type huge_pmd_flush_type(pmd_t oldpmd, pmd_t newpmd)
+static inline enum pte_flush_type huge_pmd_flush_type(pmd_t oldpmd, pmd_t newpmd,
+ bool check_pfn)
{
/* !PRESENT -> * ; no need for flush */
if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
return PTE_FLUSH_NONE;
/* PFN changed ; needs flush */
- if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
+ if (!check_pfn)
+ VM_BUG_ON(pmd_pfn(oldpmd) != pmd_pfn(newpmd));
+ else if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
return PTE_FLUSH_STRICT;
/*
@@ -677,14 +677,16 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
#endif
#ifndef pte_flush_type
-static inline struct pte_flush_type pte_flush_type(pte_t oldpte, pte_t newpte)
+static inline struct pte_flush_type pte_flush_type(pte_t oldpte, pte_t newpte,
+ bool check_pfn)
{
return PTE_FLUSH_STRICT;
}
#endif
#ifndef huge_pmd_flush_type
-static inline bool huge_pmd_flush_type(pmd_t oldpmd, pmd_t newpmd)
+static inline bool huge_pmd_flush_type(pmd_t oldpmd, pmd_t newpmd,
+ bool check_pfn)
{
return PTE_FLUSH_STRICT;
}
@@ -1818,7 +1818,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
flush_type = PTE_FLUSH_STRICT;
if (!tlb->strict)
- flush_type = huge_pmd_flush_type(oldpmd, entry);
+ flush_type = huge_pmd_flush_type(oldpmd, entry, false);
if (flush_type != PTE_FLUSH_NONE)
tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE,
flush_type == PTE_FLUSH_STRICT);
@@ -204,7 +204,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
flush_type = PTE_FLUSH_STRICT;
if (!tlb->strict)
- flush_type = pte_flush_type(oldpte, ptent);
+ flush_type = pte_flush_type(oldpte, ptent, false);
if (flush_type != PTE_FLUSH_NONE)
tlb_flush_pte_range(tlb, addr, PAGE_SIZE,
flush_type == PTE_FLUSH_STRICT);
@@ -974,7 +974,7 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
entry = pte_wrprotect(oldpte);
entry = pte_mkclean(entry);
- if (pte_flush_type(oldpte, entry) != PTE_FLUSH_NONE ||
+ if (pte_flush_type(oldpte, entry, false) != PTE_FLUSH_NONE ||
mm_tlb_flush_pending(vma->vm_mm))
flush_tlb_page(vma, address);