diff mbox series

[RFC,08/12] arm64: mm: remap kernel PTE level page tables r/o in the linear region

Message ID 20220126173011.3476262-9-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: implement read-only page tables | expand

Commit Message

Ard Biesheuvel Jan. 26, 2022, 5:30 p.m. UTC
Now that all kernel page table manipulations are routed through the
fixmap API if r/o page tables are enabled, we can remove write access
from the linear mapping of those pages.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/pgalloc.h |  6 +++++
 arch/arm64/mm/mmu.c              | 24 +++++++++++++++++++-
 2 files changed, 29 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 18a5bb0c9ee4..073482634e74 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -20,6 +20,9 @@ 
 #define __HAVE_ARCH_PMD_FREE
 #define __HAVE_ARCH_PTE_ALLOC_ONE
 #define __HAVE_ARCH_PTE_FREE
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_FREE_KERNEL
+
 #include <asm-generic/pgalloc.h>
 
 #define PGD_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
@@ -27,6 +30,9 @@ 
 pgtable_t pte_alloc_one(struct mm_struct *mm);
 void pte_free(struct mm_struct *mm, struct page *pte_page);
 
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+
 #if CONFIG_PGTABLE_LEVELS > 2
 
 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 949846654797..971501535757 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1402,7 +1402,7 @@  int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 	table = pte_offset_kernel(pmdp, addr);
 	pmd_clear(pmdp);
 	__flush_tlb_kernel_pgtable(addr);
-	pte_free_kernel(NULL, table);
+	pte_free_kernel(&init_mm, table);
 	return 1;
 }
 
@@ -1709,3 +1709,25 @@  void pte_free(struct mm_struct *mm, struct page *pte_page)
 	pgtable_pte_page_dtor(pte_page);
 	__free_page(pte_page);
 }
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+	pte_t *pte = __pte_alloc_one_kernel(mm);
+
+	VM_BUG_ON(mm != &init_mm);
+
+	if (!pte)
+		return NULL;
+	if (page_tables_are_ro())
+		set_pgtable_ro(pte);
+	return pte;
+}
+
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	VM_BUG_ON(mm != &init_mm);
+
+	if (page_tables_are_ro())
+		set_pgtable_rw(pte);
+	free_page((u64)pte);
+}