diff mbox series

[v2] arm64: Force SSBS on context switch

Message ID 20190722135309.43186-1-marc.zyngier@arm.com (mailing list archive)
State New, archived
Headers show
Series [v2] arm64: Force SSBS on context switch | expand

Commit Message

Marc Zyngier July 22, 2019, 1:53 p.m. UTC
On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/processor.h | 14 ++++++++++++--
 arch/arm64/kernel/process.c        | 17 ++++++++++++++++-
 2 files changed, 28 insertions(+), 3 deletions(-)

Comments

Neeraj Upadhyay July 22, 2019, 2:58 p.m. UTC | #1
Hi Marc,

On 7/22/19 7:23 PM, Marc Zyngier wrote:
> On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
> where only some of the CPUs implement SSBS, we end-up losing track of
> the SSBS bit across task migration.
>
> To address this issue, let's force the SSBS bit on context switch.
>
> Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>   arch/arm64/include/asm/processor.h | 14 ++++++++++++--
>   arch/arm64/kernel/process.c        | 17 ++++++++++++++++-
>   2 files changed, 28 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index fd5b1a4efc70..844e2964b0f5 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
>   		regs->pmr_save = GIC_PRIO_IRQON;
>   }
>   
> +static inline void set_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_SSBS_BIT;
> +}
> +
> +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_AA32_SSBS_BIT;
> +}
> +
>   static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   				unsigned long sp)
>   {
> @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   	regs->pstate = PSR_MODE_EL0t;
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_SSBS_BIT;
> +		set_ssbs_bit(regs);
>   
>   	regs->sp = sp;
>   }
> @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
>   #endif
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_AA32_SSBS_BIT;
> +		set_compat_ssbs_bit(regs);
>   
>   	regs->compat_sp = sp;
>   }
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 9856395ccdb7..036aa301d97d 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
>   			childregs->pstate |= PSR_UAO_BIT;
>   
>   		if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
> -			childregs->pstate |= PSR_SSBS_BIT;
> +			set_ssbs_bit(childregs);
>   
>   		if (system_uses_irq_prio_masking())
>   			childregs->pmr_save = GIC_PRIO_IRQON;
> @@ -442,6 +442,20 @@ void uao_thread_switch(struct task_struct *next)
>   	}
>   }
>   
> +static void ssbs_thread_switch(struct task_struct *next)
> +{
> +	if (likely(!(next->flags & PF_KTHREAD)) &&
> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> +		struct pt_regs *regs = task_pt_regs(next);
> +
> +		if (compat_user_mode(regs))
> +			set_compat_ssbs_bit(regs);
> +		else if (user_mode(regs))
> +			set_ssbs_bit(regs);
> +	}
> +}
> +
>   /*
>    * We store our current task in sp_el0, which is clobbered by userspace. Keep a
>    * shadow copy so that we can restore this upon entry from userspace.
> @@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
>   	entry_task_switch(next);
>   	uao_thread_switch(next);
>   	ptrauth_thread_switch(next);
> +	ssbs_thread_switch(next);
>   
>   	/*
>   	 * Complete any pending TLB or cache maintenance on this CPU in case

Looks good to me.


Thanks

Neeraj
Will Deacon July 22, 2019, 4:04 p.m. UTC | #2
On Mon, Jul 22, 2019 at 08:28:15PM +0530, Neeraj Upadhyay wrote:
> On 7/22/19 7:23 PM, Marc Zyngier wrote:
> > +static void ssbs_thread_switch(struct task_struct *next)
> > +{
> > +	if (likely(!(next->flags & PF_KTHREAD)) &&
> > +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> > +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> > +		struct pt_regs *regs = task_pt_regs(next);
> > +
> > +		if (compat_user_mode(regs))
> > +			set_compat_ssbs_bit(regs);
> > +		else if (user_mode(regs))
> > +			set_ssbs_bit(regs);
> > +	}
> > +}
> > +
> >   /*
> >    * We store our current task in sp_el0, which is clobbered by userspace. Keep a
> >    * shadow copy so that we can restore this upon entry from userspace.
> > @@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
> >   	entry_task_switch(next);
> >   	uao_thread_switch(next);
> >   	ptrauth_thread_switch(next);
> > +	ssbs_thread_switch(next);
> >   	/*
> >   	 * Complete any pending TLB or cache maintenance on this CPU in case
> 
> Looks good to me.

I inverted the logic in ssbs_thread_switch() so I could add some comments.
Please double check:

https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/commit/?h=for-next/fixes&id=cbdf8a189a66001c36007bf0f5c975d0376c5c3a

Will
Neeraj Upadhyay July 22, 2019, 4:32 p.m. UTC | #3
Hi,

On 7/22/19 9:34 PM, Will Deacon wrote:
> On Mon, Jul 22, 2019 at 08:28:15PM +0530, Neeraj Upadhyay wrote:
>> On 7/22/19 7:23 PM, Marc Zyngier wrote:
>>> +static void ssbs_thread_switch(struct task_struct *next)
>>> +{
>>> +	if (likely(!(next->flags & PF_KTHREAD)) &&
>>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
>>> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
>>> +		struct pt_regs *regs = task_pt_regs(next);
>>> +
>>> +		if (compat_user_mode(regs))
>>> +			set_compat_ssbs_bit(regs);
>>> +		else if (user_mode(regs))
>>> +			set_ssbs_bit(regs);
>>> +	}
>>> +}
>>> +
>>>    /*
>>>     * We store our current task in sp_el0, which is clobbered by userspace. Keep a
>>>     * shadow copy so that we can restore this upon entry from userspace.
>>> @@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
>>>    	entry_task_switch(next);
>>>    	uao_thread_switch(next);
>>>    	ptrauth_thread_switch(next);
>>> +	ssbs_thread_switch(next);
>>>    	/*
>>>    	 * Complete any pending TLB or cache maintenance on this CPU in case
>> Looks good to me.
> I inverted the logic in ssbs_thread_switch() so I could add some comments.
> Please double check:
>
> https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/commit/?h=for-next/fixes&id=cbdf8a189a66001c36007bf0f5c975d0376c5c3a
>
> Will

Looks good


Thanks

Neeraj
Marc Zyngier July 22, 2019, 4:41 p.m. UTC | #4
On 22/07/2019 17:04, Will Deacon wrote:
> On Mon, Jul 22, 2019 at 08:28:15PM +0530, Neeraj Upadhyay wrote:
>> On 7/22/19 7:23 PM, Marc Zyngier wrote:
>>> +static void ssbs_thread_switch(struct task_struct *next)
>>> +{
>>> +	if (likely(!(next->flags & PF_KTHREAD)) &&
>>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
>>> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
>>> +		struct pt_regs *regs = task_pt_regs(next);
>>> +
>>> +		if (compat_user_mode(regs))
>>> +			set_compat_ssbs_bit(regs);
>>> +		else if (user_mode(regs))
>>> +			set_ssbs_bit(regs);
>>> +	}
>>> +}
>>> +
>>>   /*
>>>    * We store our current task in sp_el0, which is clobbered by userspace. Keep a
>>>    * shadow copy so that we can restore this upon entry from userspace.
>>> @@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
>>>   	entry_task_switch(next);
>>>   	uao_thread_switch(next);
>>>   	ptrauth_thread_switch(next);
>>> +	ssbs_thread_switch(next);
>>>   	/*
>>>   	 * Complete any pending TLB or cache maintenance on this CPU in case
>>
>> Looks good to me.
> 
> I inverted the logic in ssbs_thread_switch() so I could add some comments.
> Please double check:
> 
> https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/commit/?h=for-next/fixes&id=cbdf8a189a66001c36007bf0f5c975d0376c5c3a
Looks good to me!

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@  static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 		regs->pmr_save = GIC_PRIO_IRQON;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 				unsigned long sp)
 {
@@ -200,7 +210,7 @@  static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 	regs->pstate = PSR_MODE_EL0t;
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_SSBS_BIT;
+		set_ssbs_bit(regs);
 
 	regs->sp = sp;
 }
@@ -219,7 +229,7 @@  static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 #endif
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_AA32_SSBS_BIT;
+		set_compat_ssbs_bit(regs);
 
 	regs->compat_sp = sp;
 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9856395ccdb7..036aa301d97d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -398,7 +398,7 @@  int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 			childregs->pstate |= PSR_UAO_BIT;
 
 		if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
-			childregs->pstate |= PSR_SSBS_BIT;
+			set_ssbs_bit(childregs);
 
 		if (system_uses_irq_prio_masking())
 			childregs->pmr_save = GIC_PRIO_IRQON;
@@ -442,6 +442,20 @@  void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+static void ssbs_thread_switch(struct task_struct *next)
+{
+	if (likely(!(next->flags & PF_KTHREAD)) &&
+	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+	    !test_tsk_thread_flag(next, TIF_SSBD)) {
+		struct pt_regs *regs = task_pt_regs(next);
+
+		if (compat_user_mode(regs))
+			set_compat_ssbs_bit(regs);
+		else if (user_mode(regs))
+			set_ssbs_bit(regs);
+	}
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +485,7 @@  __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 	entry_task_switch(next);
 	uao_thread_switch(next);
 	ptrauth_thread_switch(next);
+	ssbs_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case