diff mbox series

[1/2] arm64: Move handling of erratum 1418040 into C code

Message ID 20200728092112.3865765-2-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Allow erratum 1418040 for late CPUs | expand

Commit Message

Marc Zyngier July 28, 2020, 9:21 a.m. UTC
Instead of dealing with erratum 1418040 on each entry and exit,
let's move the handling to __switch_to() instead, which has
several advantages:

- It can be applied when it matters (switching between 32 and 64
  bit tasks).
- It is written in C (yay!)
- It can rely on static keys rather than alternatives

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kernel/entry.S   | 21 ---------------------
 arch/arm64/kernel/process.c | 29 +++++++++++++++++++++++++++++
 2 files changed, 29 insertions(+), 21 deletions(-)

Comments

Sai Prakash Ranjan July 28, 2020, 11:04 a.m. UTC | #1
On 2020-07-28 14:51, Marc Zyngier wrote:
> Instead of dealing with erratum 1418040 on each entry and exit,
> let's move the handling to __switch_to() instead, which has
> several advantages:
> 
> - It can be applied when it matters (switching between 32 and 64
>   bit tasks).
> - It is written in C (yay!)
> - It can rely on static keys rather than alternatives
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kernel/entry.S   | 21 ---------------------
>  arch/arm64/kernel/process.c | 29 +++++++++++++++++++++++++++++
>  2 files changed, 29 insertions(+), 21 deletions(-)
> 

Thanks Marc, tested the series on SC7180 and SM8150 SoCs which are 
affected by this erratum.

Tested-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
Stephen Boyd July 28, 2020, 8:04 p.m. UTC | #2
Quoting Marc Zyngier (2020-07-28 02:21:11)
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 35de8ba60e3d..44445d471442 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -169,19 +169,6 @@ alternative_cb_end
>         stp     x28, x29, [sp, #16 * 14]
>  
>         .if     \el == 0
> -       .if     \regsize == 32
> -       /*
> -        * If we're returning from a 32-bit task on a system affected by
> -        * 1418040 then re-enable userspace access to the virtual counter.
> -        */

Can this comment go above the function in C?

> -#ifdef CONFIG_ARM64_ERRATUM_1418040
> -alternative_if ARM64_WORKAROUND_1418040
> -       mrs     x0, cntkctl_el1
> -       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
> -       msr     cntkctl_el1, x0
> -alternative_else_nop_endif
> -#endif
> -       .endif
>         clear_gp_regs
>         mrs     x21, sp_el0
>         ldr_this_cpu    tsk, __entry_task, x20
> @@ -337,14 +324,6 @@ alternative_else_nop_endif
>         tst     x22, #PSR_MODE32_BIT            // native task?
>         b.eq    3f
>  
> -#ifdef CONFIG_ARM64_ERRATUM_1418040
> -alternative_if ARM64_WORKAROUND_1418040
> -       mrs     x0, cntkctl_el1
> -       bic     x0, x0, #2                      // ARCH_TIMER_USR_VCT_ACCESS_EN
> -       msr     cntkctl_el1, x0
> -alternative_else_nop_endif
> -#endif
> -
>  #ifdef CONFIG_ARM64_ERRATUM_845719
>  alternative_if ARM64_WORKAROUND_845719
>  #ifdef CONFIG_PID_IN_CONTEXTIDR
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 6089638c7d43..87c33f7c536b 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -515,6 +515,34 @@ static void entry_task_switch(struct task_struct *next)
>         __this_cpu_write(__entry_task, next);
>  }
>  
> +static void erratum_1418040_thread_switch(struct task_struct *prev,

Should it be marked __always_inline so that the cpus_have_const_cap()
check can avoid the branch to this function when there's nothing to do?

> +                                         struct task_struct *next)
> +{
> +       bool prev32, next32;
> +       u64 val;
> +
> +       if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
> +             cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
> +               return;
> +
> +       prev32 = (!(prev->flags & PF_KTHREAD) &&
> +                 is_compat_thread(task_thread_info(prev)));
> +       next32 = (!(next->flags & PF_KTHREAD) &&
> +                 is_compat_thread(task_thread_info(next)));
> +
> +       if (prev32 == next32)
> +               return;
> +
> +       val = read_sysreg(cntkctl_el1);
> +
> +       if (prev32 & !next32)
> +               val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
> +       else
> +               val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
> +
> +       write_sysreg(val, cntkctl_el1);
> +}
> +
>  /*
>   * Thread switching.
>   */
Marc Zyngier July 29, 2020, 9 a.m. UTC | #3
Hi Stephen,

On 2020-07-28 21:04, Stephen Boyd wrote:
> Quoting Marc Zyngier (2020-07-28 02:21:11)
>> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
>> index 35de8ba60e3d..44445d471442 100644
>> --- a/arch/arm64/kernel/entry.S
>> +++ b/arch/arm64/kernel/entry.S
>> @@ -169,19 +169,6 @@ alternative_cb_end
>>         stp     x28, x29, [sp, #16 * 14]
>> 
>>         .if     \el == 0
>> -       .if     \regsize == 32
>> -       /*
>> -        * If we're returning from a 32-bit task on a system affected 
>> by
>> -        * 1418040 then re-enable userspace access to the virtual 
>> counter.
>> -        */
> 
> Can this comment go above the function in C?

It will have to be a slightly different comment, as the logic changes.
Something like:

/*
  * ARM erratum 1418040 handling affecting the 32bit view of CNTVCT.
  *
  * Assuming the virtual counter is enabled at the beginning of times:
  * - disable access when switching from a 64bit task to a 32bit task
  * - enable access when switching from a 32bit task to a 64bit task
  */

> 
>> -#ifdef CONFIG_ARM64_ERRATUM_1418040
>> -alternative_if ARM64_WORKAROUND_1418040
>> -       mrs     x0, cntkctl_el1
>> -       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
>> -       msr     cntkctl_el1, x0
>> -alternative_else_nop_endif
>> -#endif
>> -       .endif
>>         clear_gp_regs
>>         mrs     x21, sp_el0
>>         ldr_this_cpu    tsk, __entry_task, x20
>> @@ -337,14 +324,6 @@ alternative_else_nop_endif
>>         tst     x22, #PSR_MODE32_BIT            // native task?
>>         b.eq    3f
>> 
>> -#ifdef CONFIG_ARM64_ERRATUM_1418040
>> -alternative_if ARM64_WORKAROUND_1418040
>> -       mrs     x0, cntkctl_el1
>> -       bic     x0, x0, #2                      // 
>> ARCH_TIMER_USR_VCT_ACCESS_EN
>> -       msr     cntkctl_el1, x0
>> -alternative_else_nop_endif
>> -#endif
>> -
>>  #ifdef CONFIG_ARM64_ERRATUM_845719
>>  alternative_if ARM64_WORKAROUND_845719
>>  #ifdef CONFIG_PID_IN_CONTEXTIDR
>> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
>> index 6089638c7d43..87c33f7c536b 100644
>> --- a/arch/arm64/kernel/process.c
>> +++ b/arch/arm64/kernel/process.c
>> @@ -515,6 +515,34 @@ static void entry_task_switch(struct task_struct 
>> *next)
>>         __this_cpu_write(__entry_task, next);
>>  }
>> 
>> +static void erratum_1418040_thread_switch(struct task_struct *prev,
> 
> Should it be marked __always_inline so that the cpus_have_const_cap()
> check can avoid the branch to this function when there's nothing to do?

I'd expect the compiler to directly inline this (in my experience it
does, as with most of the other functions called from __switch_to()).

It probably doesn't hurt though, so I'll do that.

Thanks,

         M.
Catalin Marinas July 29, 2020, 5:12 p.m. UTC | #4
On Tue, Jul 28, 2020 at 10:21:11AM +0100, Marc Zyngier wrote:
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 6089638c7d43..87c33f7c536b 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -515,6 +515,34 @@ static void entry_task_switch(struct task_struct *next)
>  	__this_cpu_write(__entry_task, next);
>  }
>  
> +static void erratum_1418040_thread_switch(struct task_struct *prev,
> +					  struct task_struct *next)
> +{
> +	bool prev32, next32;
> +	u64 val;
> +
> +	if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
> +	      cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
> +		return;
> +
> +	prev32 = (!(prev->flags & PF_KTHREAD) &&
> +		  is_compat_thread(task_thread_info(prev)));
> +	next32 = (!(next->flags & PF_KTHREAD) &&
> +		  is_compat_thread(task_thread_info(next)));

I don't think we need to test PF_KTHREAD. is_compat_thread() checks for
TIF_32BIT and we never set this on kernel threads (they are cloned from
kthreadd).
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 35de8ba60e3d..44445d471442 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -169,19 +169,6 @@  alternative_cb_end
 	stp	x28, x29, [sp, #16 * 14]
 
 	.if	\el == 0
-	.if	\regsize == 32
-	/*
-	 * If we're returning from a 32-bit task on a system affected by
-	 * 1418040 then re-enable userspace access to the virtual counter.
-	 */
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
-	mrs	x0, cntkctl_el1
-	orr	x0, x0, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
-	msr	cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
-	.endif
 	clear_gp_regs
 	mrs	x21, sp_el0
 	ldr_this_cpu	tsk, __entry_task, x20
@@ -337,14 +324,6 @@  alternative_else_nop_endif
 	tst	x22, #PSR_MODE32_BIT		// native task?
 	b.eq	3f
 
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if ARM64_WORKAROUND_1418040
-	mrs	x0, cntkctl_el1
-	bic	x0, x0, #2			// ARCH_TIMER_USR_VCT_ACCESS_EN
-	msr	cntkctl_el1, x0
-alternative_else_nop_endif
-#endif
-
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
 #ifdef CONFIG_PID_IN_CONTEXTIDR
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6089638c7d43..87c33f7c536b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -515,6 +515,34 @@  static void entry_task_switch(struct task_struct *next)
 	__this_cpu_write(__entry_task, next);
 }
 
+static void erratum_1418040_thread_switch(struct task_struct *prev,
+					  struct task_struct *next)
+{
+	bool prev32, next32;
+	u64 val;
+
+	if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
+	      cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+		return;
+
+	prev32 = (!(prev->flags & PF_KTHREAD) &&
+		  is_compat_thread(task_thread_info(prev)));
+	next32 = (!(next->flags & PF_KTHREAD) &&
+		  is_compat_thread(task_thread_info(next)));
+
+	if (prev32 == next32)
+		return;
+
+	val = read_sysreg(cntkctl_el1);
+
+	if (prev32 & !next32)
+		val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+	else
+		val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+	write_sysreg(val, cntkctl_el1);
+}
+
 /*
  * Thread switching.
  */
@@ -530,6 +558,7 @@  __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 	entry_task_switch(next);
 	uao_thread_switch(next);
 	ssbs_thread_switch(next);
+	erratum_1418040_thread_switch(prev, next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case