diff mbox series

[RFC,04/12] arm64: mm: remap PGD pages r/o in the linear region after allocation

Message ID 20220126173011.3476262-5-ardb@kernel.org (mailing list archive)
State RFC
Headers show
Series arm64: implement read-only page tables | expand

Commit Message

Ard Biesheuvel Jan. 26, 2022, 5:30 p.m. UTC
As the first step in restricting write access to all page tables via the
linear mapping, remap the page at the root PGD level of a user space
page table hierarchy read-only after allocation, so that it can only be
manipulated using the dedicated fixmap based API.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/mm/mmu.c |  7 ++++--
 arch/arm64/mm/pgd.c | 25 ++++++++++++++------
 2 files changed, 23 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index acfae9b41cc8..a52c3162beae 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -394,8 +394,11 @@  static phys_addr_t __pgd_pgtable_alloc(int shift)
 	void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
 	BUG_ON(!ptr);
 
-	/* Ensure the zeroed page is visible to the page table walker */
-	dsb(ishst);
+	if (page_tables_are_ro())
+		set_pgtable_ro(ptr);
+	else
+		/* Ensure the zeroed page is visible to the page table walker */
+		dsb(ishst);
 	return __pa(ptr);
 }
 
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 4a64089e5771..637d6eceeada 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -9,8 +9,10 @@ 
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/set_memory.h>
 #include <linux/slab.h>
 
+#include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
 #include <asm/page.h>
 #include <asm/tlbflush.h>
@@ -20,24 +22,33 @@  static struct kmem_cache *pgd_cache __ro_after_init;
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
 	gfp_t gfp = GFP_PGTABLE_USER;
+	pgd_t *pgd;
 
-	if (PGD_SIZE == PAGE_SIZE)
-		return (pgd_t *)__get_free_page(gfp);
-	else
+	if (PGD_SIZE < PAGE_SIZE && !page_tables_are_ro())
 		return kmem_cache_alloc(pgd_cache, gfp);
+
+	pgd = (pgd_t *)__get_free_page(gfp);
+	if (!pgd)
+		return NULL;
+	if (page_tables_are_ro())
+		set_pgtable_ro(pgd);
+	return pgd;
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-	if (PGD_SIZE == PAGE_SIZE)
-		free_page((unsigned long)pgd);
-	else
+	if (PGD_SIZE < PAGE_SIZE && !page_tables_are_ro()) {
 		kmem_cache_free(pgd_cache, pgd);
+	} else {
+		if (page_tables_are_ro())
+			set_pgtable_rw(pgd);
+		free_page((unsigned long)pgd);
+	}
 }
 
 void __init pgtable_cache_init(void)
 {
-	if (PGD_SIZE == PAGE_SIZE)
+	if (PGD_SIZE == PAGE_SIZE || page_tables_are_ro())
 		return;
 
 #ifdef CONFIG_ARM64_PA_BITS_52