@@ -93,4 +93,17 @@ extern void __create_pgd_mapping_extend(pgd_t *pgdir,
void *info,
int flags);
+extern int idmap_extend_pgtable;
+
+extern void create_idmap(pgd_t *pgdir, phys_addr_t phys,
+ phys_addr_t size,
+ pgprot_t prot,
+ pgtable_alloc allocator,
+ void *info,
+ int flags);
+
+#define NO_BLOCK_MAPPINGS BIT(0)
+#define NO_CONT_MAPPINGS BIT(1)
+#define NO_FIXMAP BIT(2)
+
#endif
@@ -5,10 +5,6 @@
* and MMU-enable
*/
-#define NO_BLOCK_MAPPINGS BIT(0)
-#define NO_CONT_MAPPINGS BIT(1)
-#define NO_FIXMAP BIT(2)
-
static bool pgattr_change_is_safe(u64 old, u64 new)
{
/*
@@ -274,6 +274,14 @@ int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
return 0;
}
+static phys_addr_t allocator_trans_alloc(unsigned long unused, void *info)
+{
+ unsigned long *p;
+
+ p = trans_alloc(info);
+ return virt_to_phys(p);
+}
+
/*
* The page we want to idmap may be outside the range covered by VA_BITS that
* can be built using the kernel's p?d_populate() helpers. As a one off, for a
@@ -287,38 +295,28 @@ int trans_pgd_map_page(struct trans_pgd_info *info, pgd_t *trans_pgd,
int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
unsigned long *t0sz, void *page)
{
- phys_addr_t dst_addr = virt_to_phys(page);
- unsigned long pfn = __phys_to_pfn(dst_addr);
- int max_msb = (dst_addr & GENMASK(52, 48)) ? 51 : 47;
- int bits_mapped = PAGE_SHIFT - 4;
- unsigned long level_mask, prev_level_entry, *levels[4];
- int this_level, index, level_lsb, level_msb;
-
- dst_addr &= PAGE_MASK;
- prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
-
- for (this_level = 3; this_level >= 0; this_level--) {
- levels[this_level] = trans_alloc(info);
- if (!levels[this_level])
- return -ENOMEM;
-
- level_lsb = ARM64_HW_PGTABLE_LEVEL_SHIFT(this_level);
- level_msb = min(level_lsb + bits_mapped, max_msb);
- level_mask = GENMASK_ULL(level_msb, level_lsb);
-
- index = (dst_addr & level_mask) >> level_lsb;
- *(levels[this_level] + index) = prev_level_entry;
-
- pfn = virt_to_pfn(levels[this_level]);
- prev_level_entry = pte_val(pfn_pte(pfn,
- __pgprot(PMD_TYPE_TABLE)));
-
- if (level_msb == max_msb)
- break;
- }
-
- *trans_ttbr0 = phys_to_ttbr(__pfn_to_phys(pfn));
- *t0sz = TCR_T0SZ(max_msb + 1);
+ pgd_t *pgdir = trans_alloc(info);
+ unsigned long base, step, level, va_bits;
+ int flags = NO_FIXMAP;
+
+#ifdef CONFIG_ARM64_64K_PAGES
+ base = 16;
+ step = 13;
+#elif defined(CONFIG_ARM64_4K_PAGES)
+ base = 12;
+ step = 9;
+#elif defined(CONFIG_ARM64_16K_PAGES)
+ base = 14;
+ step = 11;
+#endif
+ create_idmap(pgdir, virt_to_phys(page), PAGE_SIZE, PAGE_KERNEL_EXEC,
+ allocator_trans_alloc, info, flags);
+
+ *trans_ttbr0 = phys_to_ttbr(__virt_to_phys(pgdir));
+ level = CONFIG_PGTABLE_LEVELS + idmap_extend_pgtable ? 1 : 0;
+ va_bits = base + step * level;
+ va_bits = min(va_bits, vabits_actual);
+ *t0sz = 64 - va_bits;
return 0;
}
At present, trans_pgd_idmap_page() has its own logic to set up idmap. To share the common code, porting it onto create_idmap(). Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Steven Price <steven.price@arm.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Atish Patra <atish.patra@wdc.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Mark Brown <broonie@kernel.org> To: linux-arm-kernel@lists.infradead.org --- arch/arm64/include/asm/pgalloc.h | 13 +++++++ arch/arm64/mm/mmu_include.c | 4 --- arch/arm64/mm/trans_pgd.c | 62 ++++++++++++++++---------------- 3 files changed, 43 insertions(+), 36 deletions(-)