diff mbox series

[v3] arm64: context: Fix ASID limit in boot messages

Message ID 20200227083446.677377-1-jean-philippe@linaro.org (mailing list archive)
State Mainlined
Commit 9abd515a6e4a5c58c6eb4d04110430325eb5f5ac
Headers show
Series [v3] arm64: context: Fix ASID limit in boot messages | expand

Commit Message

Jean-Philippe Brucker Feb. 27, 2020, 8:34 a.m. UTC
Since commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI
is not in use"), the NUM_USER_ASIDS macro doesn't correspond to the
effective number of ASIDs when KPTI is enabled. Get an accurate number
of available ASIDs in an arch_initcall, once we've discovered all CPUs'
capabilities and know if we still need to halve the ASID space for KPTI.

Fixes: f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use")
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
v2->v3: move pr_info() as well, make it more readable.
v1->v2: move warning to arch_initcall(), post capabilities (e.g. E0PD)
        discovery.

This change may be a little invasive for just a validation warning, but
it will likely be needed later, in the asid-pinning patch I'd like to
introduce for IOMMU SVA.
---
 arch/arm64/mm/context.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

Comments

Vladimir Murzin Feb. 27, 2020, 9:50 a.m. UTC | #1
On 2/27/20 8:34 AM, Jean-Philippe Brucker wrote:
> Since commit f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI
> is not in use"), the NUM_USER_ASIDS macro doesn't correspond to the
> effective number of ASIDs when KPTI is enabled. Get an accurate number
> of available ASIDs in an arch_initcall, once we've discovered all CPUs'
> capabilities and know if we still need to halve the ASID space for KPTI.
> 
> Fixes: f88f42f853a8 ("arm64: context: Free up kernel ASIDs if KPTI is not in use")
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
> v2->v3: move pr_info() as well, make it more readable.
> v1->v2: move warning to arch_initcall(), post capabilities (e.g. E0PD)
>         discovery.
> 
> This change may be a little invasive for just a validation warning, but
> it will likely be needed later, in the asid-pinning patch I'd like to
> introduce for IOMMU SVA.
> ---
>  arch/arm64/mm/context.c | 20 +++++++++++++++-----
>  1 file changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index 8ef73e89d514..d89bb22589f6 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -260,14 +260,26 @@ asmlinkage void post_ttbr_update_workaround(void)
>  			CONFIG_CAVIUM_ERRATUM_27456));
>  }
>  
> -static int asids_init(void)
> +static int asids_update_limit(void)
>  {
> -	asid_bits = get_cpu_asid_bits();
> +	unsigned long num_available_asids = NUM_USER_ASIDS;
> +
> +	if (arm64_kernel_unmapped_at_el0())
> +		num_available_asids /= 2;
>  	/*
>  	 * Expect allocation after rollover to fail if we don't have at least
>  	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
>  	 */
> -	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
> +	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
> +	pr_info("ASID allocator initialised with %lu entries\n",
> +		num_available_asids);
> +	return 0;
> +}
> +arch_initcall(asids_update_limit);
> +
> +static int asids_init(void)
> +{
> +	asid_bits = get_cpu_asid_bits();
>  	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
>  	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
>  			   GFP_KERNEL);
> @@ -282,8 +294,6 @@ static int asids_init(void)
>  	 */
>  	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
>  		set_kpti_asid_bits();
> -
> -	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
>  	return 0;
>  }
>  early_initcall(asids_init);
> 

FWIW

Reviewed-by: Vladimir Murzin <vladimir.murzin@arm.com>

Thanks!
Vladimir
diff mbox series

Patch

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8ef73e89d514..d89bb22589f6 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -260,14 +260,26 @@  asmlinkage void post_ttbr_update_workaround(void)
 			CONFIG_CAVIUM_ERRATUM_27456));
 }
 
-static int asids_init(void)
+static int asids_update_limit(void)
 {
-	asid_bits = get_cpu_asid_bits();
+	unsigned long num_available_asids = NUM_USER_ASIDS;
+
+	if (arm64_kernel_unmapped_at_el0())
+		num_available_asids /= 2;
 	/*
 	 * Expect allocation after rollover to fail if we don't have at least
 	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
 	 */
-	WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+	pr_info("ASID allocator initialised with %lu entries\n",
+		num_available_asids);
+	return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+	asid_bits = get_cpu_asid_bits();
 	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
 	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
 			   GFP_KERNEL);
@@ -282,8 +294,6 @@  static int asids_init(void)
 	 */
 	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
 		set_kpti_asid_bits();
-
-	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
 	return 0;
 }
 early_initcall(asids_init);