@@ -46,6 +46,8 @@ struct mm_struct;
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr);
#define set_ptes set_ptes
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
@@ -165,7 +165,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
+ set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
} else {
/*
* If the mm subsystem is not fully up, we cannot create a
@@ -116,7 +116,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
- return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
+ return set_pte_at_unchecked(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
void set_pud_at(struct mm_struct *mm, unsigned long addr,
@@ -133,7 +133,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(!(pud_leaf(pud)));
#endif
trace_hugepage_set_pud(addr, pud_val(pud));
- return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
+ return set_pte_at_unchecked(mm, addr, pudp_ptep(pudp), pud_pte(pud));
}
static void do_serialize(void *arg)
@@ -539,7 +539,7 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
if (radix_enabled())
return radix__ptep_modify_prot_commit(vma, addr,
ptep, old_pte, pte);
- set_pte_at(vma->vm_mm, addr, ptep, pte);
+ set_pte_at_unchecked(vma->vm_mm, addr, ptep, pte);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -109,7 +109,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
ptep = pte_offset_kernel(pmdp, ea);
set_the_pte:
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
+ set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pfn, flags));
asm volatile("ptesync": : :"memory");
return 0;
}
@@ -1522,7 +1522,7 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
(atomic_read(&mm->context.copros) > 0))
radix__flush_tlb_page(vma, addr);
- set_pte_at(mm, addr, ptep, pte);
+ set_pte_at_unchecked(mm, addr, ptep, pte);
}
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
@@ -1533,7 +1533,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pud);
return 1;
}
@@ -1580,7 +1580,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
if (!radix_enabled())
return 0;
- set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+ set_pte_at_unchecked(&init_mm, 0 /* radix unused */, ptep, new_pmd);
return 1;
}
@@ -111,7 +111,7 @@ int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot)
}
ptep = pte_offset_kernel(pmdp, ea);
}
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
+ set_pte_at_unchecked(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
smp_wmb();
return 0;
@@ -224,6 +224,14 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
}
}
+void set_pte_at_unchecked(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+ pte = set_pte_filter(pte, addr);
+ __set_pte_at(mm, addr, ptep, pte, 0);
+}
+
void unmap_kernel_page(unsigned long va)
{
pmd_t *pmdp = pmd_off_k(va);
@@ -89,7 +89,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
* hash table
*/
BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
- set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
+ set_pte_at_unchecked(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
}
smp_wmb();
return err;
In the new set_ptes() API, set_pte_at() (a special case of set_ptes()) is intended to be instrumented by the page table check facility. There are however several other routines that constitute the API for setting page table entries, including set_pmd_at() among others. Such routines are themselves implemented in terms of set_ptes_at(). A future patch providing support for page table checking on powerpc must take care to avoid duplicate calls to page_table_check_p{te,md,ud}_set(). Allow for assignment of pte entries without instrumentation through the set_pte_at_unchecked() routine introduced in this patch. Cause API-facing routines that call set_pte_at() to instead call set_pte_at_unchecked(), which will remain uninstrumented by page table check. set_ptes() is itself implemented by calls to __set_pte_at(), so this eliminates redundant code. Also prefer set_pte_at_unchecked() in early-boot usages which should not be instrumented. Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> --- v9: New patch v10: don't reuse __set_pte_at(), as that will not apply filters. Instead use new set_pte_at_unchecked(). v11: Include the assertion that hwvalid => !protnone. It is possible that some of these calls can be safely replaced with __set_pte_at(), however that will have to be done at a later stage. --- arch/powerpc/include/asm/pgtable.h | 2 ++ arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- arch/powerpc/mm/book3s64/pgtable.c | 6 +++--- arch/powerpc/mm/book3s64/radix_pgtable.c | 8 ++++---- arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- arch/powerpc/mm/pgtable.c | 8 ++++++++ arch/powerpc/mm/pgtable_32.c | 2 +- 7 files changed, 20 insertions(+), 10 deletions(-)