@@ -7,7 +7,7 @@
#include <asm-generic/pgalloc.h>
-/*
+/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
@@ -20,7 +20,8 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
}
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte,
+ unsigned long vaddr)
{
pmd_set(pmd, pte);
}
@@ -34,7 +34,8 @@
#include <asm-generic/pgalloc.h>
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte,
+ unsigned long vaddr)
{
/*
* The cast to long below is OK in 32-bit PAE40 regime with long long pte
@@ -57,7 +57,7 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd_k, pte_k);
+ pmd_populate_kernel(&init_mm, pmd_k, pte_k, kvaddr);
return pte_k;
}
@@ -19,7 +19,7 @@ static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmd, pte, addr);
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
return 0;
@@ -122,7 +122,8 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
* Ensure that we always set both PMD entries.
*/
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep,
+ unsigned long vaddr)
{
/*
* The pmd must be loaded with the physical address of the PTE table
@@ -111,7 +111,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
__func__, addr);
return;
}
- pmd_populate_kernel(&init_mm, pmdp, p);
+ pmd_populate_kernel(&init_mm, pmdp, p, addr);
flush_pmd_entry(pmdp);
}
@@ -384,7 +384,7 @@ void __init early_fixmap_init(void)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte, __fix_to_virt(FIXADDR_TOP));
pte_offset_fixmap = pte_offset_early_fixmap;
}
@@ -124,7 +124,8 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
* of the mm address space.
*/
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep,
+ unsigned long vaddr)
{
VM_BUG_ON(mm && mm != &init_mm);
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
@@ -69,7 +69,7 @@ static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
dst_ptep = trans_alloc(info);
if (!dst_ptep)
return -ENOMEM;
- pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
+ pmd_populate_kernel(NULL, dst_pmdp, dst_ptep, addr);
dst_ptep = pte_offset_kernel(dst_pmdp, start);
src_ptep = pte_offset_kernel(src_pmdp, start);
@@ -11,7 +11,7 @@
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
set_pmd(pmd, __pmd(__pa(pte)));
}
@@ -62,7 +62,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
* kernel map of the active thread who's calling pmd_populate_kernel...
*/
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
extern spinlock_t kmap_gen_lock;
pmd_t *ppmd;
@@ -13,7 +13,8 @@
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm,
- pmd_t *pmd, pte_t *pte)
+ pmd_t *pmd, pte_t *pte,
+ unsigned long vaddr)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
@@ -200,7 +200,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate memory\n", __func__);
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmd, pte, addr);
}
return pte_offset_kernel(pmd, addr);
@@ -110,7 +110,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early)
memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
- pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
+ pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys), addr);
}
return pte_offset_kernel(pmdp, addr);
@@ -30,7 +30,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
-#define pmd_populate_kernel pmd_populate
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) pmd_populate(mm, pmd, pte)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
@@ -79,7 +79,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
}
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte, unsigned long vaddr)
{
pmd_set(pmd, pte);
}
@@ -23,7 +23,8 @@ do { \
tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
} while (0)
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte, unsigned long vaddr)
{
pmd_val(*pmd) = __pa((unsigned long)pte);
}
@@ -35,7 +35,7 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = (unsigned long)page_address(pte))
-#define pmd_populate_kernel(mm, pmd, pte) \
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) \
(pmd_val(*(pmd)) = (unsigned long) (pte))
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
@@ -19,7 +19,7 @@
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
@@ -133,7 +133,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
return NULL;
new_pte = kvm_mmu_memory_cache_alloc(cache);
clear_page(new_pte);
- pmd_populate_kernel(NULL, pmd, new_pte);
+ pmd_populate_kernel(NULL, pmd, new_pte, addr);
}
return pte_offset_kernel(pmd, addr);
}
@@ -15,7 +15,7 @@
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
@@ -25,7 +25,7 @@
extern int mem_init_done;
-#define pmd_populate_kernel(mm, pmd, pte) \
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) \
set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
@@ -61,13 +61,14 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte, unsigned long vaddr)
{
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
}
#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ pmd_populate_kernel(mm, pmd, page_address(pte_page), \
+ (unsigned long)page_to_virt(pte_page))
#endif
@@ -390,7 +390,7 @@ static void __ref map_pages(unsigned long start_vaddr,
pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pg_table)
panic("page table allocation failed\n");
- pmd_populate_kernel(NULL, pmd, pg_table);
+ pmd_populate_kernel(NULL, pmd, pg_table, vaddr);
}
pg_table = pte_offset_kernel(pmd, vaddr);
@@ -481,7 +481,7 @@ void free_initmem(void)
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
-
+
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
@@ -694,7 +694,7 @@ static void __init fixmap_init(void)
if (!pte)
panic("fixmap: pte allocation failed.\n");
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmd, pte, addr);
addr += PAGE_SIZE;
} while (addr < end);
@@ -26,7 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
/* #define pgd_populate(mm, pmd, pte) BUG() */
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
*pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
}
@@ -156,7 +156,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
*pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS);
}
@@ -15,7 +15,7 @@
/* #define pgd_populate(mm, pmd, pte) BUG() */
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
if (IS_ENABLED(CONFIG_BOOKE))
*pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
@@ -37,7 +37,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
pmd_set(pmd, (unsigned long)pte);
}
@@ -104,7 +104,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
if (!pmd_present(*pmdp)) {
ptep = early_alloc_pgtable(PAGE_SIZE, nid,
region_start, region_end);
- pmd_populate_kernel(&init_mm, pmdp, ptep);
+ pmd_populate_kernel(&init_mm, pmdp, ptep, ea);
}
ptep = pte_offset_kernel(pmdp, ea);
@@ -47,7 +47,7 @@ int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
if (!new)
return -ENOMEM;
kasan_populate_pte(new, PAGE_KERNEL);
- pmd_populate_kernel(&init_mm, pmd, new);
+ pmd_populate_kernel(&init_mm, pmd, new, k_cur);
}
return 0;
}
@@ -187,6 +187,6 @@ void __init kasan_early_init(void)
do {
next = pgd_addr_end(addr, end);
- pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
+ pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte, addr);
} while (pmd++, addr = next, addr != end);
}
@@ -54,7 +54,7 @@ static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgpr
if (kasan_pte_table(*pmdp)) {
ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
- pmd_populate_kernel(&init_mm, pmdp, ptep);
+ pmd_populate_kernel(&init_mm, pmdp, ptep, ea);
}
ptep = pte_offset_kernel(pmdp, ea);
@@ -93,9 +93,12 @@ void __init kasan_early_init(void)
__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
&kasan_early_shadow_pte[i], zero_pte, 0);
- for (i = 0; i < PTRS_PER_PMD; i++)
+ addr = KASAN_SHADOW_START
+ for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
- kasan_early_shadow_pte);
+ kasan_early_shadow_pte, addr);
+ addr += PMD_SIZE;
+ }
for (i = 0; i < PTRS_PER_PUD; i++)
pud_populate(&init_mm, &kasan_early_shadow_pud[i],
@@ -55,6 +55,7 @@ void __init kasan_init(void)
phys_addr_t start, end;
u64 i;
pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+ void *vaddr_start = __va(start);
if (!early_radix_enabled()) {
pr_warn("KASAN not enabled as it requires radix!");
@@ -68,9 +69,11 @@ void __init kasan_init(void)
__set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
&kasan_early_shadow_pte[i], zero_pte, 0);
- for (i = 0; i < PTRS_PER_PMD; i++)
+ for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
- kasan_early_shadow_pte);
+ kasan_early_shadow_pte,
+ vaddr_start + i * PMD_SIZE);
+ }
for (i = 0; i < PTRS_PER_PUD; i++)
pud_populate(&init_mm, &kasan_early_shadow_pud[i],
@@ -107,7 +107,7 @@ int __ref map_kernel_page(unsigned long ea, phys_addr_t pa, pgprot_t prot)
pmdp = pmd_offset(pudp, ea);
if (!pmd_present(*pmdp)) {
ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
- pmd_populate_kernel(&init_mm, pmdp, ptep);
+ pmd_populate_kernel(&init_mm, pmdp, ptep, ea);
}
ptep = pte_offset_kernel(pmdp, ea);
}
@@ -43,7 +43,7 @@ notrace void __init early_ioremap_init(void)
for (; (s32)(FIXADDR_TOP - addr) > 0;
addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
- pmd_populate_kernel(&init_mm, pmdp, ptep);
+ pmd_populate_kernel(&init_mm, pmdp, ptep, addr);
early_ioremap_setup();
}
@@ -64,7 +64,7 @@ pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
if (pmd_none(*pmdp)) {
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
- pmd_populate_kernel(&init_mm, pmdp, ptep);
+ pmd_populate_kernel(&init_mm, pmdp, ptep, va);
}
return pte_offset_kernel(pmdp, va);
}
@@ -16,7 +16,7 @@
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm,
- pmd_t *pmd, pte_t *pte)
+ pmd_t *pmd, pte_t *pte, unsigned long vaddr)
{
unsigned long pfn = virt_to_pfn(pte);
@@ -176,7 +176,7 @@ static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long
if (!dst_ptep)
return -ENOMEM;
- pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
+ pmd_populate_kernel(NULL, dst_pmdp, dst_ptep, 0);
}
dst_ptep = pte_offset_kernel(dst_pmdp, start);
@@ -131,7 +131,7 @@ static inline void pmd_populate(struct mm_struct *mm,
set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
}
-#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) pmd_populate(mm, pmd, pte)
/*
* page table entry allocation/free routines.
@@ -21,7 +21,7 @@ extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
+ pte_t *pte, unsigned long vaddr)
{
set_pmd(pmd, __pmd((unsigned long)pte));
}
@@ -157,7 +157,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmd, pte, 0);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -53,7 +53,8 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
-#define pmd_populate_kernel pmd_populate
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) \
+ pmd_populate(mm, pmd, pte)
pgtable_t pte_alloc_one(struct mm_struct *mm);
@@ -69,8 +69,8 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pte_free_defer pte_free_defer
void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
-#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
+#define pmd_populate_kernel(MM, PMD, PTE, VADDR) pmd_set(MM, PMD, PTE)
+#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
void pgtable_free(void *table, bool is_page);
@@ -5,7 +5,7 @@
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-
+
#include <linux/extable.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1843,7 +1843,7 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
if (!new)
goto err_alloc;
alloc_bytes += PAGE_SIZE;
- pmd_populate_kernel(&init_mm, pmd, new);
+ pmd_populate_kernel(&init_mm, pmd, new, vstart);
}
pte = pte_offset_kernel(pmd, vstart);
@@ -2404,11 +2404,11 @@ void __init paging_init(void)
* work.
*/
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
-
+
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
inherit_prom_mappings();
-
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
+/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
@@ -12,7 +12,7 @@
#include <asm-generic/pgalloc.h>
-#define pmd_populate_kernel(mm, pmd, pte) \
+#define pmd_populate_kernel(mm, pmd, pte, vaddr) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
#define pmd_populate(mm, pmd, pte) \
@@ -62,7 +62,8 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
}
static inline void pmd_populate_kernel(struct mm_struct *mm,
- pmd_t *pmd, pte_t *pte)
+ pmd_t *pmd, pte_t *pte,
+ unsigned long vaddr)
{
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
@@ -73,7 +73,15 @@ static inline void fname##_init(struct mm_struct *mm, \
DEFINE_POPULATE(p4d_populate, p4d, pud, init)
DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
DEFINE_POPULATE(pud_populate, pud, pmd, init)
-DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
+
+static inline void pmd_populate_kernel_init(struct mm_struct *mm,
+ pmd_t *arg1, pte_t *arg2, unsigned long arg3, bool init)
+{
+ if (init)
+ pmd_populate_kernel_safe(mm, arg1, arg2);
+ else
+ pmd_populate_kernel(mm, arg1, arg2, arg3);
+}
#define DEFINE_ENTRY(type1, type2, init) \
static inline void set_##type1##_init(type1##_t *arg1, \
@@ -286,7 +294,7 @@ static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
{
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) spp_getpage();
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmd, pte, vaddr);
if (pte != pte_offset_kernel(pmd, 0))
printk(KERN_ERR "PAGETABLE BUG #03!\n");
}
@@ -575,7 +583,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
spin_lock(&init_mm.page_table_lock);
- pmd_populate_kernel_init(&init_mm, pmd, pte, init);
+ pmd_populate_kernel_init(&init_mm, pmd, pte, init, __va(paddr));
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
@@ -888,7 +888,7 @@ void __init early_ioremap_init(void)
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
memset(bm_pte, 0, sizeof(bm_pte));
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte, fix_to_virt(FIX_BTMAP_BEGIN));
/*
* The boot-ioremap range spans multiple pmds, for which
@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
}
p = early_alloc(PAGE_SIZE, nid, true);
- pmd_populate_kernel(&init_mm, pmd, p);
+ pmd_populate_kernel(&init_mm, pmd, p, addr);
}
pte = pte_offset_kernel(pmd, addr);
@@ -21,7 +21,7 @@
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_populate_kernel(mm, pmdp, ptep) \
+#define pmd_populate_kernel(mm, pmdp, ptep, vaddr) \
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
@@ -2802,7 +2802,7 @@ static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
-int __pte_alloc_kernel(pmd_t *pmd);
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long vaddr);
#if defined(CONFIG_MMU)
@@ -2997,7 +2997,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
- ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS
@@ -58,7 +58,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
if (!pgtable)
return -ENOMEM;
- pmd_populate_kernel(&init_mm, &__pmd, pgtable);
+ pmd_populate_kernel(&init_mm, &__pmd, pgtable, start);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
pte_t entry, *pte;
@@ -81,7 +81,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
/* Make pte visible before pmd. See comment in pmd_install(). */
smp_wmb();
- pmd_populate_kernel(&init_mm, pmd, pgtable);
+ pmd_populate_kernel(&init_mm, pmd, pgtable, start);
if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH))
flush_tlb_kernel_range(start, start + PMD_SIZE);
} else {
@@ -117,7 +117,8 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ lm_alias(kasan_early_shadow_pte),
+ addr);
continue;
}
@@ -131,7 +132,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
if (!p)
return -ENOMEM;
- pmd_populate_kernel(&init_mm, pmd, p);
+ pmd_populate_kernel(&init_mm, pmd, p, addr);
}
zero_pte_populate(pmd, addr, next);
} while (pmd++, addr = next, addr != end);
@@ -158,7 +159,8 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ lm_alias(kasan_early_shadow_pte),
+ addr);
continue;
}
@@ -204,7 +206,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ lm_alias(kasan_early_shadow_pte),
+ addr);
continue;
}
@@ -267,7 +270,8 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_early_shadow_pte));
+ lm_alias(kasan_early_shadow_pte),
+ addr);
continue;
}
@@ -447,7 +447,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
return 0;
}
-int __pte_alloc_kernel(pmd_t *pmd)
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long vaddr)
{
pte_t *new = pte_alloc_one_kernel(&init_mm);
if (!new)
@@ -456,7 +456,7 @@ int __pte_alloc_kernel(pmd_t *pmd)
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
smp_wmb(); /* See comment in pmd_install() */
- pmd_populate_kernel(&init_mm, pmd, new);
+ pmd_populate_kernel(&init_mm, pmd, new, vaddr);
new = NULL;
}
spin_unlock(&init_mm.page_table_lock);
@@ -3238,7 +3238,7 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
if (!new)
goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
+ pmd_populate_kernel(&init_mm, pmd, new, addr);
}
return;
@@ -45,7 +45,8 @@ static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
#define pte_alloc_kernel_track(pmd, address, mask) \
((unlikely(pmd_none(*(pmd))) && \
- (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
+ (__pte_alloc_kernel(pmd, address) || \
+ ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
NULL: pte_offset_kernel(pmd, address))
#endif /* _LINUX_PGALLOC_TRACK_H */
@@ -191,7 +191,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
- pmd_populate_kernel(&init_mm, pmd, p);
+ pmd_populate_kernel(&init_mm, pmd, p, addr);
}
return pmd;
}
This patch affords each architecture the ability to condition the population of page middle directory entries on the virtual address being allocated, matching existing PTE infrastructure, easing the necessity of performing a reverse page table walk in cases where the population context is not readily accessible, i.e. dynamic vmalloc calls on arm64. To achieve this goal, it modifies every call and implementation of the pmd_populate_kernel function across architectures, ensuring uniform adoption across all kernel deployments. Signed-off-by: Maxwell Bland <mbland@motorola.com> --- Hi all, Thank you for taking the time to review this change. This effects many subarchitectures so the maintainers list is large. Apologies in advance if there is a specific maintainer I should have spoken with directly for deployment across subprojects. The reason for such a sweeping change is from lore.kernel.org/all/cf5409c3-254a-459b-8969-429db2ec6439@redhat.com It is my understanding as well that some subarchitectures may have separate "next" or development branches ahead of the main upstream linux. Please let me know if a cherry-pick to that branch is desired and I will do my best to check out and deploy it as possible. arch/alpha/include/asm/pgalloc.h | 5 +++-- arch/arc/include/asm/pgalloc.h | 3 ++- arch/arc/mm/highmem.c | 2 +- arch/arm/include/asm/kfence.h | 2 +- arch/arm/include/asm/pgalloc.h | 3 ++- arch/arm/mm/kasan_init.c | 2 +- arch/arm/mm/mmu.c | 2 +- arch/arm64/include/asm/pgalloc.h | 3 ++- arch/arm64/mm/trans_pgd.c | 2 +- arch/csky/include/asm/pgalloc.h | 2 +- arch/hexagon/include/asm/pgalloc.h | 2 +- arch/loongarch/include/asm/pgalloc.h | 3 ++- arch/loongarch/mm/init.c | 2 +- arch/loongarch/mm/kasan_init.c | 2 +- arch/m68k/include/asm/mcf_pgalloc.h | 2 +- arch/m68k/include/asm/motorola_pgalloc.h | 3 ++- arch/m68k/include/asm/sun3_pgalloc.h | 3 ++- arch/microblaze/include/asm/pgalloc.h | 2 +- arch/mips/include/asm/pgalloc.h | 2 +- arch/mips/kvm/mmu.c | 2 +- arch/nios2/include/asm/pgalloc.h | 2 +- arch/openrisc/include/asm/pgalloc.h | 2 +- arch/parisc/include/asm/pgalloc.h | 5 +++-- arch/parisc/mm/init.c | 6 +++--- arch/powerpc/include/asm/book3s/32/pgalloc.h | 2 +- arch/powerpc/include/asm/book3s/64/pgalloc.h | 2 +- arch/powerpc/include/asm/nohash/32/pgalloc.h | 2 +- arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +- arch/powerpc/mm/kasan/init_32.c | 4 ++-- arch/powerpc/mm/kasan/init_book3e_64.c | 9 ++++++--- arch/powerpc/mm/kasan/init_book3s_64.c | 7 +++++-- arch/powerpc/mm/nohash/book3e_pgtable.c | 2 +- arch/powerpc/mm/pgtable_32.c | 4 ++-- arch/riscv/include/asm/pgalloc.h | 2 +- arch/riscv/kernel/hibernate.c | 2 +- arch/s390/include/asm/pgalloc.h | 2 +- arch/sh/include/asm/pgalloc.h | 2 +- arch/sh/mm/init.c | 2 +- arch/sparc/include/asm/pgalloc_32.h | 3 ++- arch/sparc/include/asm/pgalloc_64.h | 4 ++-- arch/sparc/mm/init_64.c | 8 ++++---- arch/um/include/asm/pgalloc.h | 4 ++-- arch/x86/include/asm/pgalloc.h | 3 ++- arch/x86/mm/init_64.c | 14 +++++++++++--- arch/x86/mm/ioremap.c | 2 +- arch/x86/mm/kasan_init_64.c | 2 +- arch/xtensa/include/asm/pgalloc.h | 2 +- include/linux/mm.h | 4 ++-- mm/hugetlb_vmemmap.c | 4 ++-- mm/kasan/init.c | 14 +++++++++----- mm/memory.c | 4 ++-- mm/percpu.c | 2 +- mm/pgalloc-track.h | 3 ++- mm/sparse-vmemmap.c | 2 +- 55 files changed, 107 insertions(+), 78 deletions(-)