Message ID | 20220613144550.3760857-20-ardb@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: refactor boot flow and add support for WXN | expand |
On Mon, Jun 13, 2022 at 04:45:43PM +0200, Ard Biesheuvel wrote: > The early KASLR init code runs extremely early, and anything that could > be deferred until later should be. So let's defer the randomization of > the module region until much later - this also simplifies the > arithmetic, given that we no longer have to reason about the link time > vs load time placement of the core kernel explicitly. Also get rid of > the global status variable, and infer the status reported by the > diagnostic print from other KASLR related context. > > While at it, get rid of the special case for KASAN without > KASAN_VMALLOC, which never occurs in practice. > > Signed-off-by: Ard Biesheuvel <ardb@kernel.org> > --- > arch/arm64/kernel/kaslr.c | 95 +++++++++----------- > 1 file changed, 40 insertions(+), 55 deletions(-) [...] > @@ -163,33 +169,12 @@ u64 __init kaslr_early_init(void) > * when ARM64_MODULE_PLTS is enabled. > */ > module_range = MODULES_VSIZE - (u64)(_etext - _stext); > - module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; > } > > /* use the lower 21 bits to randomize the base of the module region */ > module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; > module_alloc_base &= PAGE_MASK; > > - return offset; > -} > - > -static int __init kaslr_init(void) > -{ > - switch (kaslr_status) { > - case KASLR_ENABLED: > - pr_info("KASLR enabled\n"); > - break; > - case KASLR_DISABLED_CMDLINE: > - pr_info("KASLR disabled on command line\n"); > - break; > - case KASLR_DISABLED_NO_SEED: > - pr_warn("KASLR disabled due to lack of seed\n"); > - break; > - case KASLR_DISABLED_FDT_REMAP: > - pr_warn("KASLR disabled due to FDT remapping failure\n"); > - break; > - } > - > return 0; > } > -core_initcall(kaslr_init) > +late_initcall(kaslr_init) Are you sure this isn't too late? I'm nervous that we might have called request_module() off the back of all the other initcalls that we've run by this point. Will
On Fri, 24 Jun 2022 at 15:08, Will Deacon <will@kernel.org> wrote: > > On Mon, Jun 13, 2022 at 04:45:43PM +0200, Ard Biesheuvel wrote: > > The early KASLR init code runs extremely early, and anything that could > > be deferred until later should be. So let's defer the randomization of > > the module region until much later - this also simplifies the > > arithmetic, given that we no longer have to reason about the link time > > vs load time placement of the core kernel explicitly. Also get rid of > > the global status variable, and infer the status reported by the > > diagnostic print from other KASLR related context. > > > > While at it, get rid of the special case for KASAN without > > KASAN_VMALLOC, which never occurs in practice. > > > > Signed-off-by: Ard Biesheuvel <ardb@kernel.org> > > --- > > arch/arm64/kernel/kaslr.c | 95 +++++++++----------- > > 1 file changed, 40 insertions(+), 55 deletions(-) > > [...] > > > @@ -163,33 +169,12 @@ u64 __init kaslr_early_init(void) > > * when ARM64_MODULE_PLTS is enabled. > > */ > > module_range = MODULES_VSIZE - (u64)(_etext - _stext); > > - module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; > > } > > > > /* use the lower 21 bits to randomize the base of the module region */ > > module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; > > module_alloc_base &= PAGE_MASK; > > > > - return offset; > > -} > > - > > -static int __init kaslr_init(void) > > -{ > > - switch (kaslr_status) { > > - case KASLR_ENABLED: > > - pr_info("KASLR enabled\n"); > > - break; > > - case KASLR_DISABLED_CMDLINE: > > - pr_info("KASLR disabled on command line\n"); > > - break; > > - case KASLR_DISABLED_NO_SEED: > > - pr_warn("KASLR disabled due to lack of seed\n"); > > - break; > > - case KASLR_DISABLED_FDT_REMAP: > > - pr_warn("KASLR disabled due to FDT remapping failure\n"); > > - break; > > - } > > - > > return 0; > > } > > -core_initcall(kaslr_init) > > +late_initcall(kaslr_init) > > Are you sure this isn't too late? I'm nervous that we might have called > request_module() off the back of all the other initcalls that we've run by > this point. > Yeah, I just realized the other day that this is probably too late. subsys_initcall() might be more suitable here
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index d5542666182f..af9ffe4d0f0f 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -20,14 +20,6 @@ #include <asm/sections.h> #include <asm/setup.h> -enum kaslr_status { - KASLR_ENABLED, - KASLR_DISABLED_CMDLINE, - KASLR_DISABLED_NO_SEED, - KASLR_DISABLED_FDT_REMAP, -}; - -static enum kaslr_status __initdata kaslr_status; u64 __ro_after_init module_alloc_base; u16 __initdata memstart_offset_seed; @@ -63,15 +55,9 @@ struct arm64_ftr_override kaslr_feature_override __initdata; u64 __init kaslr_early_init(void) { void *fdt; - u64 seed, offset, mask, module_range; + u64 seed, offset, mask; unsigned long raw; - /* - * Set a reasonable default for module_alloc_base in case - * we end up running with module randomization disabled. - */ - module_alloc_base = (u64)_etext - MODULES_VSIZE; - /* * Try to map the FDT early. If this fails, we simply bail, * and proceed with KASLR disabled. We will make another @@ -79,7 +65,6 @@ u64 __init kaslr_early_init(void) */ fdt = get_early_fdt_ptr(); if (!fdt) { - kaslr_status = KASLR_DISABLED_FDT_REMAP; return 0; } @@ -93,7 +78,6 @@ u64 __init kaslr_early_init(void) * return 0 if that is the case. */ if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) { - kaslr_status = KASLR_DISABLED_CMDLINE; return 0; } @@ -106,7 +90,6 @@ u64 __init kaslr_early_init(void) seed ^= raw; if (!seed) { - kaslr_status = KASLR_DISABLED_NO_SEED; return 0; } @@ -126,19 +109,43 @@ u64 __init kaslr_early_init(void) /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; - if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && - (IS_ENABLED(CONFIG_KASAN_GENERIC) || - IS_ENABLED(CONFIG_KASAN_SW_TAGS))) - /* - * KASAN without KASAN_VMALLOC does not expect the module region - * to intersect the vmalloc region, since shadow memory is - * allocated for each module at load time, whereas the vmalloc - * region is shadowed by KASAN zero pages. So keep modules - * out of the vmalloc region if KASAN is enabled without - * KASAN_VMALLOC, and put the kernel well within 4 GB of the - * module region. - */ - return offset % SZ_2G; + return offset; +} + +static int __init kaslr_init(void) +{ + u64 module_range; + u32 seed; + + /* + * Set a reasonable default for module_alloc_base in case + * we end up running with module randomization disabled. + */ + module_alloc_base = (u64)_etext - MODULES_VSIZE; + + if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) { + pr_info("KASLR disabled on command line\n"); + return 0; + } + + if (!kaslr_offset()) { + pr_warn("KASLR disabled due to lack of seed\n"); + return 0; + } + + pr_info("KASLR enabled\n"); + + /* + * KASAN without KASAN_VMALLOC does not expect the module region to + * intersect the vmalloc region, since shadow memory is allocated for + * each module at load time, whereas the vmalloc region will already be + * shadowed by KASAN zero pages. + */ + BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) && + !IS_ENABLED(CONFIG_KASAN_VMALLOC)); + + seed = get_random_u32(); if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { /* @@ -150,8 +157,7 @@ u64 __init kaslr_early_init(void) * resolved normally.) */ module_range = SZ_2G - (u64)(_end - _stext); - module_alloc_base = max((u64)_end + offset - SZ_2G, - (u64)MODULES_VADDR); + module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR); } else { /* * Randomize the module region by setting module_alloc_base to @@ -163,33 +169,12 @@ u64 __init kaslr_early_init(void) * when ARM64_MODULE_PLTS is enabled. */ module_range = MODULES_VSIZE - (u64)(_etext - _stext); - module_alloc_base = (u64)_etext + offset - MODULES_VSIZE; } /* use the lower 21 bits to randomize the base of the module region */ module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; - return offset; -} - -static int __init kaslr_init(void) -{ - switch (kaslr_status) { - case KASLR_ENABLED: - pr_info("KASLR enabled\n"); - break; - case KASLR_DISABLED_CMDLINE: - pr_info("KASLR disabled on command line\n"); - break; - case KASLR_DISABLED_NO_SEED: - pr_warn("KASLR disabled due to lack of seed\n"); - break; - case KASLR_DISABLED_FDT_REMAP: - pr_warn("KASLR disabled due to FDT remapping failure\n"); - break; - } - return 0; } -core_initcall(kaslr_init) +late_initcall(kaslr_init)
The early KASLR init code runs extremely early, and anything that could be deferred until later should be. So let's defer the randomization of the module region until much later - this also simplifies the arithmetic, given that we no longer have to reason about the link time vs load time placement of the core kernel explicitly. Also get rid of the global status variable, and infer the status reported by the diagnostic print from other KASLR related context. While at it, get rid of the special case for KASAN without KASAN_VMALLOC, which never occurs in practice. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/kernel/kaslr.c | 95 +++++++++----------- 1 file changed, 40 insertions(+), 55 deletions(-)