@@ -10,6 +10,7 @@ extern void mmu_mark_enabled(int cpu);
extern void mmu_mark_disabled(int cpu);
extern void mmu_enable(pgd_t *pgtable);
extern void mmu_disable(void);
+extern void mmu_setup_early(phys_addr_t phys_end);
extern void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
phys_addr_t phys_start, phys_addr_t phys_end,
@@ -41,10 +41,16 @@
#define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
#define pgd_free(pgd) free(pgd)
+static inline pgd_t *pgd_alloc_early(void)
+{
+ pgd_t *pgd = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pgd, 0, PAGE_SIZE);
+ return pgd;
+}
static inline pgd_t *pgd_alloc(void)
{
assert(PTRS_PER_PGD * sizeof(pgd_t) <= PAGE_SIZE);
- pgd_t *pgd = alloc_page();
+ pgd_t *pgd = page_alloc_initialized() ? alloc_page() : pgd_alloc_early();
return pgd;
}
@@ -65,10 +71,16 @@ static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
(pgd_page_vaddr(*(pgd)) + pmd_index(addr))
#define pmd_free(pmd) free_page(pmd)
+static inline pmd_t *pmd_alloc_one_early(void)
+{
+ pmd_t *pmd = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pmd, 0, PAGE_SIZE);
+ return pmd;
+}
static inline pmd_t *pmd_alloc_one(void)
{
assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
- pmd_t *pmd = alloc_page();
+ pmd_t *pmd = page_alloc_initialized() ? alloc_page() : pmd_alloc_one_early();
return pmd;
}
static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
@@ -92,10 +104,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
(pmd_page_vaddr(*(pmd)) + pte_index(addr))
#define pte_free(pte) free_page(pte)
+static inline pte_t *pte_alloc_one_early(void)
+{
+ pte_t *pte = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pte, 0, PAGE_SIZE);
+ return pte;
+}
static inline pte_t *pte_alloc_one(void)
{
assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
- pte_t *pte = alloc_page();
+ pte_t *pte = page_alloc_initialized() ? alloc_page() : pte_alloc_one_early();
return pte;
}
static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
@@ -104,6 +122,7 @@ static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
pmd_t entry;
pmd_val(entry) = pgtable_pa(pte_alloc_one()) | PMD_TYPE_TABLE;
WRITE_ONCE(*pmd, entry);
+
}
return pte_offset(pmd, addr);
}
@@ -12,11 +12,10 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
-#include "alloc_page.h"
#include "vmalloc.h"
-#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <linux/compiler.h>
@@ -201,7 +200,7 @@ void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
}
}
-void *setup_mmu(phys_addr_t phys_end, void *unused)
+void mmu_setup_early(phys_addr_t phys_end)
{
struct mem_region *r;
@@ -216,9 +215,7 @@ void *setup_mmu(phys_addr_t phys_end, void *unused)
"Unsupported translation granule %ld\n", PAGE_SIZE);
#endif
- if (!mmu_idmap)
- mmu_idmap = pgd_alloc();
-
+ mmu_idmap = pgd_alloc();
for (r = mem_regions; r->end; ++r) {
if (r->flags & MR_F_IO) {
continue;
@@ -236,7 +233,22 @@ void *setup_mmu(phys_addr_t phys_end, void *unused)
}
}
- mmu_enable(mmu_idmap);
+ /*
+ * Open-code part of mmu_enabled(), because at this point thread_info
+ * hasn't been initialized. mmu_mark_enabled() cannot be called here
+ * because the cpumask operations can only be called later, after
+ * nr_cpus is initialized in cpu_init().
+ */
+ asm_mmu_enable((phys_addr_t)(unsigned long)mmu_idmap);
+ current_thread_info()->pgtable = mmu_idmap;
+}
+
+void *setup_mmu(phys_addr_t phys_end, void *unused1)
+{
+ assert(mmu_idmap);
+
+ mmu_mark_enabled(0);
+
return mmu_idmap;
}
@@ -26,6 +26,7 @@
#include <asm/smp.h>
#include <asm/timer.h>
#include <asm/psci.h>
+#include <asm/mmu.h>
#include "io.h"
@@ -226,6 +227,9 @@ static void mem_init(phys_addr_t freemem_start)
phys_alloc_init(freemem_start, freemem->end - freemem_start);
phys_alloc_set_minimum_alignment(SMP_CACHE_BYTES);
+ if (!(auxinfo.flags & AUXINFO_MMU_OFF))
+ mmu_setup_early(freemem->end);
+
phys_alloc_get_unused(&base, &top);
base = PAGE_ALIGN(base);
top = top & PAGE_MASK;
@@ -417,6 +421,9 @@ static efi_status_t efi_mem_init(efi_bootinfo_t *efi_bootinfo)
phys_alloc_init(free_mem_start, free_mem_pages << EFI_PAGE_SHIFT);
phys_alloc_set_minimum_alignment(SMP_CACHE_BYTES);
+ if (!(auxinfo.flags & AUXINFO_MMU_OFF))
+ mmu_setup_early(free_mem_start + (free_mem_pages << EFI_PAGE_SHIFT));
+
phys_alloc_get_unused(&base, &top);
base = PAGE_ALIGN(base);
top = top & PAGE_MASK;
@@ -47,10 +47,16 @@
#define pgd_offset(pgtable, addr) ((pgtable) + pgd_index(addr))
#define pgd_free(pgd) free(pgd)
+static inline pgd_t *pgd_alloc_early(void)
+{
+ pgd_t *pgd = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pgd, 0, PAGE_SIZE);
+ return pgd;
+}
static inline pgd_t *pgd_alloc(void)
{
assert(PTRS_PER_PGD * sizeof(pgd_t) <= PAGE_SIZE);
- pgd_t *pgd = alloc_page();
+ pgd_t *pgd = page_alloc_initialized() ? alloc_page() : pgd_alloc_early();
return pgd;
}
@@ -75,10 +81,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_offset(pud, addr) \
(pud_page_vaddr(*(pud)) + pmd_index(addr))
#define pmd_free(pmd) free_page(pmd)
+static inline pmd_t *pmd_alloc_one_early(void)
+{
+ pmd_t *pmd = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pmd, 0, PAGE_SIZE);
+ return pmd;
+}
static inline pmd_t *pmd_alloc_one(void)
{
assert(PTRS_PER_PMD * sizeof(pmd_t) == PAGE_SIZE);
- pmd_t *pmd = alloc_page();
+ pmd_t *pmd = page_alloc_initialized() ? alloc_page() : pmd_alloc_one_early();
return pmd;
}
static inline pmd_t *pmd_alloc(pud_t *pud, unsigned long addr)
@@ -102,10 +114,16 @@ static inline pmd_t *pmd_alloc(pud_t *pud, unsigned long addr)
#define pud_offset(pgd, addr) \
(pgd_page_vaddr(*(pgd)) + pud_index(addr))
#define pud_free(pud) free_page(pud)
+static inline pud_t *pud_alloc_one_early(void)
+{
+ pud_t *pud = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pud, 0, PAGE_SIZE);
+ return pud;
+}
static inline pud_t *pud_alloc_one(void)
{
assert(PTRS_PER_PUD * sizeof(pud_t) == PAGE_SIZE);
- pud_t *pud = alloc_page();
+ pud_t *pud = page_alloc_initialized() ? alloc_page() : pud_alloc_one_early();
return pud;
}
static inline pud_t *pud_alloc(pgd_t *pgd, unsigned long addr)
@@ -129,10 +147,16 @@ static inline pud_t *pud_alloc(pgd_t *pgd, unsigned long addr)
(pmd_page_vaddr(*(pmd)) + pte_index(addr))
#define pte_free(pte) free_page(pte)
+static inline pte_t *pte_alloc_one_early(void)
+{
+ pte_t *pte = memalign(PAGE_SIZE, PAGE_SIZE);
+ memset(pte, 0, PAGE_SIZE);
+ return pte;
+}
static inline pte_t *pte_alloc_one(void)
{
assert(PTRS_PER_PTE * sizeof(pte_t) == PAGE_SIZE);
- pte_t *pte = alloc_page();
+ pte_t *pte = page_alloc_initialized() ? alloc_page() : pte_alloc_one_early();
return pte;
}
static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)