@@ -83,11 +83,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
}
#define pmd_pgtable(pmd) pmd_page(pmd)
+typedef phys_addr_t (*pgtable_alloc)(unsigned long shift, void *data);
+
extern void __create_pgd_mapping_extend(pgd_t *pgdir,
unsigned int entries_cnt, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags);
#endif
@@ -27,11 +27,12 @@ void __create_pgd_mapping_extend(pgd_t *pgdir,
unsigned long virt,
phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags)
{
__create_pgd_mapping(pgdir, entries_cnt, phys, virt, size, prot,
- pgtable_alloc, flags);
+ allocator, info, flags);
}
#endif
@@ -86,7 +86,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(int shift)
+static phys_addr_t __init early_pgtable_alloc(unsigned long unused_a, void *unused_b)
{
phys_addr_t phys;
void *ptr;
@@ -113,7 +113,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
return phys;
}
-static phys_addr_t __pgd_pgtable_alloc(int shift)
+static phys_addr_t __pgd_pgtable_alloc(unsigned long unused_a, void *unused_b)
{
void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
BUG_ON(!ptr);
@@ -123,9 +123,9 @@ static phys_addr_t __pgd_pgtable_alloc(int shift)
return __pa(ptr);
}
-static phys_addr_t pgd_pgtable_alloc(int shift)
+static phys_addr_t pgd_pgtable_alloc(unsigned long shift, void *unused)
{
- phys_addr_t pa = __pgd_pgtable_alloc(shift);
+ phys_addr_t pa = __pgd_pgtable_alloc(shift, unused);
/*
* Call proper page table ctor in case later we need to
@@ -154,7 +154,8 @@ int idmap_extend_pgtable;
void create_idmap(pgd_t *pgdir, phys_addr_t phys,
phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags)
{
u64 ptrs_per_pgd = idmap_ptrs_per_pgd;
@@ -162,13 +163,13 @@ void create_idmap(pgd_t *pgdir, phys_addr_t phys,
#ifdef CONFIG_IDMAP_PGTABLE_EXPAND
if (idmap_extend_pgtable)
__create_pgd_mapping_extend(pgdir, ptrs_per_pgd,
- phys, phys, size, prot, pgtable_alloc, flags);
+ phys, phys, size, prot, allocator, info, flags);
else
__create_pgd_mapping(pgdir, ptrs_per_pgd,
- phys, phys, size, prot, pgtable_alloc, flags);
+ phys, phys, size, prot, allocator, info, flags);
#else
__create_pgd_mapping(pgdir, ptrs_per_pgd,
- phys, phys, size, prot, pgtable_alloc, flags);
+ phys, phys, size, prot, allocator, info, flags);
#endif
}
@@ -186,7 +187,7 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
return;
}
__create_pgd_mapping(init_mm.pgd, PTRS_PER_PGD, phys, virt, size, prot, NULL,
- NO_CONT_MAPPINGS);
+ NULL, NO_CONT_MAPPINGS);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
@@ -201,7 +202,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(mm->pgd, PTRS_PER_PGD, phys, virt, size, prot,
- pgd_pgtable_alloc, flags);
+ pgd_pgtable_alloc, NULL, flags);
}
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
@@ -214,7 +215,7 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
}
__create_pgd_mapping(init_mm.pgd, PTRS_PER_PGD, phys, virt, size, prot, NULL,
- NO_CONT_MAPPINGS);
+ NULL, NO_CONT_MAPPINGS);
/* flush the TLBs after updating live kernel mappings */
flush_tlb_kernel_range(virt, virt + size);
@@ -224,7 +225,7 @@ static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
phys_addr_t end, pgprot_t prot, int flags)
{
__create_pgd_mapping(pgdp, PTRS_PER_PGD, start, __phys_to_virt(start), end - start,
- prot, early_pgtable_alloc, flags);
+ prot, early_pgtable_alloc, NULL, flags);
}
void __init mark_linear_text_alias_ro(void)
@@ -325,7 +326,7 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(size));
__create_pgd_mapping(pgdp, PTRS_PER_PGD, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, flags);
+ early_pgtable_alloc, NULL, flags);
if (!(vm_flags & VM_NO_GUARD))
size += PAGE_SIZE;
@@ -369,7 +370,7 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
__create_pgd_mapping(tramp_pg_dir, PTRS_PER_PGD, pa_start, TRAMP_VALIAS, PAGE_SIZE,
- prot, __pgd_pgtable_alloc, 0);
+ prot, __pgd_pgtable_alloc, NULL, 0);
/* Map both the text and data into the kernel page table */
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
@@ -1261,7 +1262,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, PTRS_PER_PGD, start, __phys_to_virt(start),
- size, params->pgprot, __pgd_pgtable_alloc,
+ size, params->pgprot, __pgd_pgtable_alloc, NULL,
flags);
memblock_clear_nomap(start, size);
@@ -69,7 +69,8 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags)
{
unsigned long next;
@@ -79,8 +80,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
if (pmd_none(pmd)) {
phys_addr_t pte_phys;
- BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc(PAGE_SHIFT);
+ BUG_ON(!allocator);
+ pte_phys = allocator(PAGE_SHIFT, info);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
pmd = READ_ONCE(*pmdp);
}
@@ -104,7 +105,9 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int), int flags)
+ pgtable_alloc allocator,
+ void *info,
+ int flags)
{
unsigned long next;
pmd_t *pmdp;
@@ -128,7 +131,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
READ_ONCE(pmd_val(*pmdp))));
} else {
alloc_init_cont_pte(pmdp, addr, next, phys, prot,
- pgtable_alloc, flags);
+ allocator, info, flags);
BUG_ON(pmd_val(old_pmd) != 0 &&
pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
@@ -142,7 +145,9 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int), int flags)
+ pgtable_alloc allocator,
+ void *info,
+ int flags)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
@@ -154,8 +159,8 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
if (pud_none(pud)) {
phys_addr_t pmd_phys;
- BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc(PMD_SHIFT);
+ BUG_ON(!allocator);
+ pmd_phys = allocator(PMD_SHIFT, info);
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
pud = READ_ONCE(*pudp);
}
@@ -171,7 +176,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
(flags & NO_CONT_MAPPINGS) == 0)
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
+ init_pmd(pudp, addr, next, phys, __prot, allocator, info, flags);
phys += next - addr;
} while (addr = next, addr != end);
@@ -191,7 +196,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags)
{
unsigned long next;
@@ -202,8 +208,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
if (p4d_none(p4d)) {
phys_addr_t pud_phys;
- BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc(PUD_SHIFT);
+ BUG_ON(!allocator);
+ pud_phys = allocator(PUD_SHIFT, info);
__p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
p4d = READ_ONCE(*p4dp);
}
@@ -230,7 +236,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
READ_ONCE(pud_val(*pudp))));
} else {
alloc_init_cont_pmd(pudp, addr, next, phys, prot,
- pgtable_alloc, flags);
+ allocator, info, flags);
BUG_ON(pud_val(old_pud) != 0 &&
pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
@@ -244,7 +250,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
static void __create_pgd_mapping(pgd_t *pgdir, unsigned int entries_cnt, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(int),
+ pgtable_alloc allocator,
+ void *info,
int flags)
{
unsigned long addr, end, next;
@@ -268,8 +275,8 @@ static void __create_pgd_mapping(pgd_t *pgdir, unsigned int entries_cnt, phys_ad
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
- flags);
+ alloc_init_pud(pgdp, addr, next, phys, prot, allocator,
+ info, flags);
phys += next - addr;
} while (pgdp++, addr = next, addr != end);
}
The current pgtable allocator only accepts the depth of pgtable as parameter. And the allocator function itself determines the memory pool info. But incoming pgtable allocator needs an extra param to get local pool info, which directs the allocation. Here preparing the prototype for the incoming change. Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Steven Price <steven.price@arm.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Atish Patra <atish.patra@wdc.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Mark Brown <broonie@kernel.org> To: linux-arm-kernel@lists.infradead.org --- arch/arm64/include/asm/pgalloc.h | 5 +++- arch/arm64/mm/idmap_mmu.c | 5 ++-- arch/arm64/mm/mmu.c | 31 +++++++++++++------------ arch/arm64/mm/mmu_include.c | 39 +++++++++++++++++++------------- 4 files changed, 46 insertions(+), 34 deletions(-)