diff mbox

[12/15] arm: prepare mmu code for arm64

Message ID 1418241608-13966-13-git-send-email-drjones@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Jones Dec. 10, 2014, 8 p.m. UTC
* don't assume 1G PGDIR_SIZE or L1_CACHE_BYTES pgd alignment
* use page level descriptors for non-I/O memory
* apply new pgd/pud/pmd/pte methods
* split mmu.h to share function declarations
* use more generic flag names in mmu.c

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/arm/asm/mmu-api.h | 14 +++++++++++
 lib/arm/asm/mmu.h     | 10 +++++---
 lib/arm/asm/setup.h   |  3 +++
 lib/arm/mmu.c         | 69 +++++++++++++++++++++++++++++++++++++--------------
 4 files changed, 74 insertions(+), 22 deletions(-)
 create mode 100644 lib/arm/asm/mmu-api.h
diff mbox

Patch

diff --git a/lib/arm/asm/mmu-api.h b/lib/arm/asm/mmu-api.h
new file mode 100644
index 0000000000000..f2511e3dc7dee
--- /dev/null
+++ b/lib/arm/asm/mmu-api.h
@@ -0,0 +1,14 @@ 
+#ifndef __ASMARM_MMU_API_H_
+#define __ASMARM_MMU_API_H_
+extern pgd_t *mmu_idmap;
+extern bool mmu_enabled(void);
+extern void mmu_enable(pgd_t *pgtable);
+extern void mmu_enable_idmap(void);
+extern void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset);
+extern void mmu_set_range_sect(pgd_t *pgtable, unsigned long virt_offset,
+			       unsigned long phys_start, unsigned long phys_end,
+			       pgprot_t prot);
+extern void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
+			       unsigned long phys_start, unsigned long phys_end,
+			       pgprot_t prot);
+#endif
diff --git a/lib/arm/asm/mmu.h b/lib/arm/asm/mmu.h
index 254c29f84fe6f..5ec7a6ce5886b 100644
--- a/lib/arm/asm/mmu.h
+++ b/lib/arm/asm/mmu.h
@@ -8,6 +8,11 @@ 
 #include <asm/pgtable.h>
 #include <asm/barrier.h>
 
+#define PTE_USER		L_PTE_USER
+#define PTE_SHARED		L_PTE_SHARED
+#define PTE_AF			PTE_EXT_AF
+#define PTE_WBWA		L_PTE_MT_WRITEALLOC
+
 static inline void local_flush_tlb_all(void)
 {
 	asm volatile("mcr p15, 0, %0, c8, c7, 0" :: "r" (0));
@@ -21,9 +26,6 @@  static inline void flush_tlb_all(void)
 	local_flush_tlb_all();
 }
 
-extern bool mmu_enabled(void);
-extern void mmu_enable(pgd_t *pgtable);
-extern void mmu_enable_idmap(void);
-extern void mmu_init_io_sect(pgd_t *pgtable);
+#include <asm/mmu-api.h>
 
 #endif /* __ASMARM_MMU_H_ */
diff --git a/lib/arm/asm/setup.h b/lib/arm/asm/setup.h
index 450501cc6e8e3..02b668672fca4 100644
--- a/lib/arm/asm/setup.h
+++ b/lib/arm/asm/setup.h
@@ -17,6 +17,9 @@  extern phys_addr_t __phys_offset, __phys_end;
 
 #define PHYS_OFFSET		(__phys_offset)
 #define PHYS_END		(__phys_end)
+/* mach-virt reserves the first 1G section for I/O */
+#define PHYS_IO_OFFSET		(0UL)
+#define PHYS_IO_END		(1UL << 30)
 
 #define L1_CACHE_SHIFT		6
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index 7a975c6708de4..55d18a10e1ebd 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -8,9 +8,9 @@ 
 #include <asm/setup.h>
 #include <asm/mmu.h>
 
-static bool mmu_on;
-static pgd_t idmap[PTRS_PER_PGD] __attribute__((aligned(L1_CACHE_BYTES)));
+pgd_t *mmu_idmap;
 
+static bool mmu_on;
 bool mmu_enabled(void)
 {
 	return mmu_on;
@@ -24,29 +24,62 @@  void mmu_enable(pgd_t *pgtable)
 	mmu_on = true;
 }
 
-void mmu_init_io_sect(pgd_t *pgtable)
+void mmu_set_range_ptes(pgd_t *pgtable, unsigned long virt_offset,
+			unsigned long phys_start, unsigned long phys_end,
+			pgprot_t prot)
 {
-	/*
-	 * mach-virt reserves the first 1G section for I/O
-	 */
-	pgd_val(pgtable[0]) = PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_USER;
-	pgd_val(pgtable[0]) |= PMD_SECT_UNCACHED;
+	unsigned long vaddr = virt_offset & PAGE_MASK;
+	unsigned long paddr = phys_start & PAGE_MASK;
+	unsigned long virt_end = phys_end - paddr + vaddr;
+
+	for (; vaddr < virt_end; vaddr += PAGE_SIZE, paddr += PAGE_SIZE) {
+		pgd_t *pgd = pgd_offset(pgtable, vaddr);
+		pud_t *pud = pud_alloc(pgd, vaddr);
+		pmd_t *pmd = pmd_alloc(pud, vaddr);
+		pte_t *pte = pte_alloc(pmd, vaddr);
+
+		pte_val(*pte) = paddr;
+		pte_val(*pte) |= PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
+		pte_val(*pte) |= pgprot_val(prot);
+	}
+}
+
+void mmu_set_range_sect(pgd_t *pgtable, unsigned long virt_offset,
+			unsigned long phys_start, unsigned long phys_end,
+			pgprot_t prot)
+{
+	unsigned long vaddr = virt_offset & PGDIR_MASK;
+	unsigned long paddr = phys_start & PGDIR_MASK;
+	unsigned long virt_end = phys_end - paddr + vaddr;
+
+	for (; vaddr < virt_end; vaddr += PGDIR_SIZE, paddr += PGDIR_SIZE) {
+		pgd_t *pgd = pgd_offset(pgtable, vaddr);
+		pgd_val(*pgd) = paddr;
+		pgd_val(*pgd) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S;
+		pgd_val(*pgd) |= pgprot_val(prot);
+	}
+}
+
+
+void mmu_init_io_sect(pgd_t *pgtable, unsigned long virt_offset)
+{
+	mmu_set_range_sect(pgtable, virt_offset,
+		PHYS_IO_OFFSET, PHYS_IO_END,
+		__pgprot(PMD_SECT_UNCACHED | PMD_SECT_USER));
 }
 
 void mmu_enable_idmap(void)
 {
-	unsigned long sect, end;
+	unsigned long phys_end = sizeof(long) == 8 || !(PHYS_END >> 32)
+						? PHYS_END : 0xfffff000;
 
-	mmu_init_io_sect(idmap);
+	mmu_idmap = pgd_alloc();
 
-	end = sizeof(long) == 8 || !(PHYS_END >> 32) ? PHYS_END : 0xfffff000;
+	mmu_init_io_sect(mmu_idmap, PHYS_IO_OFFSET);
 
-	for (sect = PHYS_OFFSET & PGDIR_MASK; sect < end; sect += PGDIR_SIZE) {
-		int i = sect >> PGDIR_SHIFT;
-		pgd_val(idmap[i]) = sect;
-		pgd_val(idmap[i]) |= PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_USER;
-		pgd_val(idmap[i]) |= PMD_SECT_S | PMD_SECT_WBWA;
-	}
+	mmu_set_range_ptes(mmu_idmap, PHYS_OFFSET,
+		PHYS_OFFSET, phys_end,
+		__pgprot(PTE_WBWA | PTE_USER));
 
-	mmu_enable(idmap);
+	mmu_enable(mmu_idmap);
 }