@@ -47,6 +47,7 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
+#define flush_icache_pages(vma, page) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
@@ -50,12 +50,12 @@
#define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT)
#define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT)
#define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT)
-#define _PFN_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
+#define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT)
#define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT)
#define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT)
-#define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \
+#define _PFN_MASK (~((_ULCAST_(1) << (PFN_PTE_SHIFT)) - 1) & \
((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1))
/*
@@ -237,9 +237,9 @@ extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
-#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
/*
* Initialize a new pgd / pud / pmd table with invalid pointers.
@@ -334,19 +334,13 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
}
}
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- set_pte(ptep, pteval);
-}
-
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
- set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
+ set_pte(ptep, __pte(_PAGE_GLOBAL));
else
- set_pte_at(mm, addr, ptep, __pte(0));
+ set_pte(ptep, __pte(0));
}
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
@@ -445,11 +439,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
{
- __update_tlb(vma, address, ptep);
+ for (;;) {
+ __update_tlb(vma, address, ptep);
+ if (--nr == 0)
+ break;
+ address += PAGE_SIZE;
+ ptep++;
+ }
}
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
@@ -462,7 +465,7 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
+ return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -107,7 +107,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
- pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+ pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
return pmd;
}
@@ -252,7 +252,7 @@ static void output_pgtable_bits_defines(void)
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
- pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
pr_debug("\n");
}