diff mbox series

[v7,4/4] arm64: Use a variable to store non-global mappings decision

Message ID 20191106130052.10642-5-broonie@kernel.org (mailing list archive)
State New, archived
Headers show
Series E0PD support | expand

Commit Message

Mark Brown Nov. 6, 2019, 1 p.m. UTC
Refactor the code which checks to see if we need to use non-global
mappings to use a variable instead of checking with the CPU capabilities
each time, doing the initial check for KPTI early in boot before we
start allocating memory so we still avoid transitioning to non-global
mappings in common cases.

Since this variable always matches our decision about non-global
mappings this means we can also combine arm64_kernel_use_ng_mappings()
and arm64_unmap_kernel_at_el0() into a single function, the variable
simply stores the result and the decision code is elsewhere. We could
just have the users check the variable directly but having a function
makes it clear that these uses are read-only.

The result is that we simplify the code a bit and reduces the amount of
code executed at runtime.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/mmu.h          | 78 ++-------------------------
 arch/arm64/include/asm/pgtable-prot.h |  4 +-
 arch/arm64/kernel/cpufeature.c        | 41 ++++++++++++--
 arch/arm64/kernel/setup.c             |  7 +++
 4 files changed, 51 insertions(+), 79 deletions(-)

Comments

Suzuki K Poulose Nov. 7, 2019, 11:11 a.m. UTC | #1
Hi Mark,

On 06/11/2019 13:00, Mark Brown wrote:
> Refactor the code which checks to see if we need to use non-global
> mappings to use a variable instead of checking with the CPU capabilities
> each time, doing the initial check for KPTI early in boot before we
> start allocating memory so we still avoid transitioning to non-global
> mappings in common cases.
> 
> Since this variable always matches our decision about non-global
> mappings this means we can also combine arm64_kernel_use_ng_mappings()
> and arm64_unmap_kernel_at_el0() into a single function, the variable
> simply stores the result and the decision code is elsewhere. We could
> just have the users check the variable directly but having a function
> makes it clear that these uses are read-only.
> 
> The result is that we simplify the code a bit and reduces the amount of
> code executed at runtime.
> 
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
>   arch/arm64/include/asm/mmu.h          | 78 ++-------------------------
>   arch/arm64/include/asm/pgtable-prot.h |  4 +-
>   arch/arm64/kernel/cpufeature.c        | 41 ++++++++++++--
>   arch/arm64/kernel/setup.c             |  7 +++
>   4 files changed, 51 insertions(+), 79 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
> index d61908bf4c9c..e4d862420bb4 100644
> --- a/arch/arm64/include/asm/mmu.h
> +++ b/arch/arm64/include/asm/mmu.h
> @@ -29,82 +29,11 @@ typedef struct {
>    */
>   #define ASID(mm)	((mm)->context.id.counter & 0xffff)
>   
> -static inline bool arm64_kernel_unmapped_at_el0(void)
> -{
> -	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
> -	       cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
> -}
> +extern bool arm64_use_ng_mappings;
>   
> -static inline bool kaslr_requires_kpti(void)
> -{
> -	bool tx1_bug;
> -	u64 ftr;
> -
> -	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
> -		return false;
> -
> -	/*
> -	 * E0PD does a similar job to KPTI so can be used instead
> -	 * where available.
> -	 */
> -	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
> -		ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
> -		if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
> -			return false;
> -	}
> -
> -	/*
> -	 * Systems affected by Cavium erratum 24756 are incompatible
> -	 * with KPTI.
> -	 */
> -	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
> -		tx1_bug = false;
> -#ifndef MODULE
> -	} else if (!static_branch_likely(&arm64_const_caps_ready)) {
> -		extern const struct midr_range cavium_erratum_27456_cpus[];
> -
> -		tx1_bug = is_midr_in_range_list(read_cpuid_id(),
> -						cavium_erratum_27456_cpus);
> -#endif
> -	} else {
> -		tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
> -	}
> -	if (tx1_bug)
> -		return false;
> -
> -	return kaslr_offset() > 0;
> -}
> -
> -static inline bool arm64_kernel_use_ng_mappings(void)
> +static inline bool arm64_kernel_unmapped_at_el0(void)
>   {
> -	/* What's a kpti? Use global mappings if we don't know. */
> -	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
> -		return false;
> -
> -	/*
> -	 * Note: this function is called before the CPU capabilities have
> -	 * been configured, so our early mappings will be global. If we
> -	 * later determine that kpti is required, then
> -	 * kpti_install_ng_mappings() will make them non-global.
> -	 */
> -	if (arm64_kernel_unmapped_at_el0())
> -		return true;
> -
> -	/*
> -	 * Once we are far enough into boot for capabilities to be
> -	 * ready we will have confirmed if we are using non-global
> -	 * mappings so don't need to consider anything else here.
> -	 */
> -	if (static_branch_likely(&arm64_const_caps_ready))
> -		return false;
> -
> -	/*
> -	 * KASLR is enabled so we're going to be enabling kpti on non-broken
> -	 * CPUs regardless of their susceptibility to Meltdown. Rather
> -	 * than force everybody to go through the G -> nG dance later on,
> -	 * just put down non-global mappings from the beginning
> -	 */
> -	return kaslr_requires_kpti();
> +	return arm64_use_ng_mappings;
>   }
>   
>   typedef void (*bp_hardening_cb_t)(void);
> @@ -158,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
>   			       pgprot_t prot, bool page_mappings_only);
>   extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
>   extern void mark_linear_text_alias_ro(void);
> +extern bool kaslr_requires_kpti(void);
>   
>   #define INIT_MM_CONTEXT(name)	\
>   	.pgd = init_pg_dir,
> diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
> index 9a21b84536f2..eb1c6f83343d 100644
> --- a/arch/arm64/include/asm/pgtable-prot.h
> +++ b/arch/arm64/include/asm/pgtable-prot.h
> @@ -26,8 +26,8 @@
>   #define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
>   #define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
>   
> -#define PTE_MAYBE_NG		(arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
> -#define PMD_MAYBE_NG		(arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
> +#define PTE_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
> +#define PMD_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
>   
>   #define PROT_DEFAULT		(_PROT_DEFAULT | PTE_MAYBE_NG)
>   #define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 0d551af06421..7ee7cd8b32a0 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -47,6 +47,9 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
>   /* Need also bit for ARM64_CB_PATCH */
>   DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
>   
> +bool arm64_use_ng_mappings = false;
> +EXPORT_SYMBOL(arm64_use_ng_mappings);
> +
>   /*
>    * Flag to indicate if we have computed the system wide
>    * capabilities based on the boot time active CPUs. This
> @@ -961,6 +964,39 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
>   	return has_cpuid_feature(entry, scope);
>   }
>   
> +bool kaslr_requires_kpti(void)
> +{
> +	bool tx1_bug;
> +	u64 ftr;
> +
> +	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
> +		return false;
> +
> +	/*
> +	 * E0PD does a similar job to KPTI so can be used instead
> +	 * where available.
> +	 */
> +	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
> +		ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
> +		if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
> +			return false;
> +	}
> +
> +	/*
> +	 * Systems affected by Cavium erratum 24756 are incompatible
> +	 * with KPTI.
> +	 */
> +	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
> +		tx1_bug = false;
> +	} else {
> +		tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);

This is problematic. When this is called from setup_arch(), we haven't run
the capability check on the boot CPU (which happens via
smp_prepare_boot_cpu() -> cpuinfo_store_boot_cpu()-> init_cpu_features()).

So we could get "false" here, since the state is not updated and go ahead with
nG mappings on TX1 with KASLR, not good. So, you need to retain the midr_list
check when the caps are not ready.

> +	}
> +	if (tx1_bug)
> +		return false;
> +
> +	return kaslr_offset() > 0;
> +}
> +
>   static bool __meltdown_safe = true;
>   static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
>   
> @@ -1038,7 +1074,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>   	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
>   	kpti_remap_fn *remap_fn;
>   
> -	static bool kpti_applied = false;
>   	int cpu = smp_processor_id();
>   
>   	/*
> @@ -1046,7 +1081,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>   	 * it already or we have KASLR enabled and therefore have not
>   	 * created any global mappings at all.
>   	 */
> -	if (kpti_applied || kaslr_offset() > 0)
> +	if (arm64_use_ng_mappings)
>   		return;
>   
>   	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
> @@ -1056,7 +1091,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
>   	cpu_uninstall_idmap();
>   
>   	if (!cpu)
> -		kpti_applied = true;
> +		arm64_use_ng_mappings = true;
>   
>   	return;
>   }
> diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
> index 56f664561754..f28308c9882c 100644
> --- a/arch/arm64/kernel/setup.c
> +++ b/arch/arm64/kernel/setup.c
> @@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p)
>   
>   	*cmdline_p = boot_command_line;
>   
> +        /*
> +         * If know now we are going to need KPTI then use non-global
> +         * mappings from the start, avoiding the cost of rewriting
> +         * everything later.
> +         */
> +        arm64_use_ng_mappings = kaslr_requires_kpti();

nit: ^^^ white spaces instead of tabs.

Suzuki
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d61908bf4c9c..e4d862420bb4 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -29,82 +29,11 @@  typedef struct {
  */
 #define ASID(mm)	((mm)->context.id.counter & 0xffff)
 
-static inline bool arm64_kernel_unmapped_at_el0(void)
-{
-	return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
-	       cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
-}
+extern bool arm64_use_ng_mappings;
 
-static inline bool kaslr_requires_kpti(void)
-{
-	bool tx1_bug;
-	u64 ftr;
-
-	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
-		return false;
-
-	/*
-	 * E0PD does a similar job to KPTI so can be used instead
-	 * where available.
-	 */
-	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
-		ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
-		if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
-			return false;
-	}
-
-	/*
-	 * Systems affected by Cavium erratum 24756 are incompatible
-	 * with KPTI.
-	 */
-	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
-		tx1_bug = false;
-#ifndef MODULE
-	} else if (!static_branch_likely(&arm64_const_caps_ready)) {
-		extern const struct midr_range cavium_erratum_27456_cpus[];
-
-		tx1_bug = is_midr_in_range_list(read_cpuid_id(),
-						cavium_erratum_27456_cpus);
-#endif
-	} else {
-		tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
-	}
-	if (tx1_bug)
-		return false;
-
-	return kaslr_offset() > 0;
-}
-
-static inline bool arm64_kernel_use_ng_mappings(void)
+static inline bool arm64_kernel_unmapped_at_el0(void)
 {
-	/* What's a kpti? Use global mappings if we don't know. */
-	if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
-		return false;
-
-	/*
-	 * Note: this function is called before the CPU capabilities have
-	 * been configured, so our early mappings will be global. If we
-	 * later determine that kpti is required, then
-	 * kpti_install_ng_mappings() will make them non-global.
-	 */
-	if (arm64_kernel_unmapped_at_el0())
-		return true;
-
-	/*
-	 * Once we are far enough into boot for capabilities to be
-	 * ready we will have confirmed if we are using non-global
-	 * mappings so don't need to consider anything else here.
-	 */
-	if (static_branch_likely(&arm64_const_caps_ready))
-		return false;
-
-	/*
-	 * KASLR is enabled so we're going to be enabling kpti on non-broken
-	 * CPUs regardless of their susceptibility to Meltdown. Rather
-	 * than force everybody to go through the G -> nG dance later on,
-	 * just put down non-global mappings from the beginning
-	 */
-	return kaslr_requires_kpti();
+	return arm64_use_ng_mappings;
 }
 
 typedef void (*bp_hardening_cb_t)(void);
@@ -158,6 +87,7 @@  extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
 extern void mark_linear_text_alias_ro(void);
+extern bool kaslr_requires_kpti(void);
 
 #define INIT_MM_CONTEXT(name)	\
 	.pgd = init_pg_dir,
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 9a21b84536f2..eb1c6f83343d 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -26,8 +26,8 @@ 
 #define _PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PTE_MAYBE_NG		(arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
-#define PMD_MAYBE_NG		(arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG		(arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
 
 #define PROT_DEFAULT		(_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT	(_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0d551af06421..7ee7cd8b32a0 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,9 @@  static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
 /* Need also bit for ARM64_CB_PATCH */
 DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
 
+bool arm64_use_ng_mappings = false;
+EXPORT_SYMBOL(arm64_use_ng_mappings);
+
 /*
  * Flag to indicate if we have computed the system wide
  * capabilities based on the boot time active CPUs. This
@@ -961,6 +964,39 @@  has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
 	return has_cpuid_feature(entry, scope);
 }
 
+bool kaslr_requires_kpti(void)
+{
+	bool tx1_bug;
+	u64 ftr;
+
+	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+		return false;
+
+	/*
+	 * E0PD does a similar job to KPTI so can be used instead
+	 * where available.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
+		ftr = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+		if ((ftr >> ID_AA64MMFR2_E0PD_SHIFT) & 0xf)
+			return false;
+	}
+
+	/*
+	 * Systems affected by Cavium erratum 24756 are incompatible
+	 * with KPTI.
+	 */
+	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+		tx1_bug = false;
+	} else {
+		tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
+	}
+	if (tx1_bug)
+		return false;
+
+	return kaslr_offset() > 0;
+}
+
 static bool __meltdown_safe = true;
 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
 
@@ -1038,7 +1074,6 @@  kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
 	kpti_remap_fn *remap_fn;
 
-	static bool kpti_applied = false;
 	int cpu = smp_processor_id();
 
 	/*
@@ -1046,7 +1081,7 @@  kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 	 * it already or we have KASLR enabled and therefore have not
 	 * created any global mappings at all.
 	 */
-	if (kpti_applied || kaslr_offset() > 0)
+	if (arm64_use_ng_mappings)
 		return;
 
 	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -1056,7 +1091,7 @@  kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 	cpu_uninstall_idmap();
 
 	if (!cpu)
-		kpti_applied = true;
+		arm64_use_ng_mappings = true;
 
 	return;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 56f664561754..f28308c9882c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -285,6 +285,13 @@  void __init setup_arch(char **cmdline_p)
 
 	*cmdline_p = boot_command_line;
 
+        /*
+         * If know now we are going to need KPTI then use non-global
+         * mappings from the start, avoiding the cost of rewriting
+         * everything later.
+         */
+        arm64_use_ng_mappings = kaslr_requires_kpti();
+
 	early_fixmap_init();
 	early_ioremap_init();