diff mbox

[10/39] x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack

Message ID 1531906876-13451-11-git-send-email-joro@8bytes.org (mailing list archive)
State New, archived
Headers show

Commit Message

Joerg Roedel July 18, 2018, 9:40 a.m. UTC
From: Joerg Roedel <jroedel@suse.de>

It can happen that we enter the kernel from kernel-mode and
on the entry-stack. The most common way this happens is when
we get an exception while loading the user-space segment
registers on the kernel-to-userspace exit path.

The segment loading needs to be done after the entry-stack
switch, because the stack-switch needs kernel %fs for
per_cpu access.

When this happens, we need to make sure that we leave the
kernel with the entry-stack again, so that the interrupted
code-path runs on the right stack when switching to the
user-cr3.

We do this by detecting this condition on kernel-entry by
checking CS.RPL and %esp, and if it happens, we copy over
the complete content of the entry stack to the task-stack.
This needs to be done because once we enter the exception
handlers we might be scheduled out or even migrated to a
different CPU, so that we can't rely on the entry-stack
contents. We also leave a marker in the stack-frame to
detect this condition on the exit path.

On the exit path the copy is reversed, we copy all of the
remaining task-stack back to the entry-stack and switch
to it.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/entry/entry_32.S | 116 +++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 115 insertions(+), 1 deletion(-)

Comments

Jan Kiszka Oct. 12, 2018, 6:29 p.m. UTC | #1
On 18.07.18 11:40, Joerg Roedel wrote:
> From: Joerg Roedel <jroedel@suse.de>
> 
> It can happen that we enter the kernel from kernel-mode and
> on the entry-stack. The most common way this happens is when
> we get an exception while loading the user-space segment
> registers on the kernel-to-userspace exit path.
> 
> The segment loading needs to be done after the entry-stack
> switch, because the stack-switch needs kernel %fs for
> per_cpu access.
> 
> When this happens, we need to make sure that we leave the
> kernel with the entry-stack again, so that the interrupted
> code-path runs on the right stack when switching to the
> user-cr3.
> 
> We do this by detecting this condition on kernel-entry by
> checking CS.RPL and %esp, and if it happens, we copy over
> the complete content of the entry stack to the task-stack.
> This needs to be done because once we enter the exception
> handlers we might be scheduled out or even migrated to a
> different CPU, so that we can't rely on the entry-stack
> contents. We also leave a marker in the stack-frame to
> detect this condition on the exit path.
> 
> On the exit path the copy is reversed, we copy all of the
> remaining task-stack back to the entry-stack and switch
> to it.
> 
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> ---
>   arch/x86/entry/entry_32.S | 116 +++++++++++++++++++++++++++++++++++++++++++++-
>   1 file changed, 115 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> index 7635925..9d6eceb 100644
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -294,6 +294,9 @@
>    * copied there. So allocate the stack-frame on the task-stack and
>    * switch to it before we do any copying.
>    */
> +
> +#define CS_FROM_ENTRY_STACK	(1 << 31)
> +
>   .macro SWITCH_TO_KERNEL_STACK
>   
>   	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
> @@ -316,6 +319,16 @@
>   	/* Load top of task-stack into %edi */
>   	movl	TSS_entry2task_stack(%edi), %edi
>   
> +	/*
> +	 * Clear unused upper bits of the dword containing the word-sized CS
> +	 * slot in pt_regs in case hardware didn't clear it for us.
> +	 */
> +	andl	$(0x0000ffff), PT_CS(%esp)
> +
> +	/* Special case - entry from kernel mode via entry stack */
> +	testl	$SEGMENT_RPL_MASK, PT_CS(%esp)
> +	jz	.Lentry_from_kernel_\@
> +
>   	/* Bytes to copy */
>   	movl	$PTREGS_SIZE, %ecx
>   
> @@ -329,8 +342,8 @@
>   	 */
>   	addl	$(4 * 4), %ecx
>   
> -.Lcopy_pt_regs_\@:
>   #endif
> +.Lcopy_pt_regs_\@:
>   
>   	/* Allocate frame on task-stack */
>   	subl	%ecx, %edi
> @@ -346,6 +359,56 @@
>   	cld
>   	rep movsl
>   
> +	jmp .Lend_\@
> +
> +.Lentry_from_kernel_\@:
> +
> +	/*
> +	 * This handles the case when we enter the kernel from
> +	 * kernel-mode and %esp points to the entry-stack. When this
> +	 * happens we need to switch to the task-stack to run C code,
> +	 * but switch back to the entry-stack again when we approach
> +	 * iret and return to the interrupted code-path. This usually
> +	 * happens when we hit an exception while restoring user-space
> +	 * segment registers on the way back to user-space.
> +	 *
> +	 * When we switch to the task-stack here, we can't trust the
> +	 * contents of the entry-stack anymore, as the exception handler
> +	 * might be scheduled out or moved to another CPU. Therefore we
> +	 * copy the complete entry-stack to the task-stack and set a
> +	 * marker in the iret-frame (bit 31 of the CS dword) to detect
> +	 * what we've done on the iret path.
> +	 *
> +	 * On the iret path we copy everything back and switch to the
> +	 * entry-stack, so that the interrupted kernel code-path
> +	 * continues on the same stack it was interrupted with.
> +	 *
> +	 * Be aware that an NMI can happen anytime in this code.
> +	 *
> +	 * %esi: Entry-Stack pointer (same as %esp)
> +	 * %edi: Top of the task stack
> +	 */
> +
> +	/* Calculate number of bytes on the entry stack in %ecx */
> +	movl	%esi, %ecx
> +
> +	/* %ecx to the top of entry-stack */
> +	andl	$(MASK_entry_stack), %ecx
> +	addl	$(SIZEOF_entry_stack), %ecx
> +
> +	/* Number of bytes on the entry stack to %ecx */
> +	sub	%esi, %ecx
> +
> +	/* Mark stackframe as coming from entry stack */
> +	orl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
> +
> +	/*
> +	 * %esi and %edi are unchanged, %ecx contains the number of
> +	 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
> +	 * the stack-frame on task-stack and copy everything over
> +	 */
> +	jmp .Lcopy_pt_regs_\@
> +
>   .Lend_\@:
>   .endm
>   
> @@ -404,6 +467,56 @@
>   .endm
>   
>   /*
> + * This macro handles the case when we return to kernel-mode on the iret
> + * path and have to switch back to the entry stack.
> + *
> + * See the comments below the .Lentry_from_kernel_\@ label in the
> + * SWITCH_TO_KERNEL_STACK macro for more details.
> + */
> +.macro PARANOID_EXIT_TO_KERNEL_MODE
> +
> +	/*
> +	 * Test if we entered the kernel with the entry-stack. Most
> +	 * likely we did not, because this code only runs on the
> +	 * return-to-kernel path.
> +	 */
> +	testl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
> +	jz	.Lend_\@
> +
> +	/* Unlikely slow-path */
> +
> +	/* Clear marker from stack-frame */
> +	andl	$(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
> +
> +	/* Copy the remaining task-stack contents to entry-stack */
> +	movl	%esp, %esi
> +	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
> +
> +	/* Bytes on the task-stack to ecx */
> +	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
> +	subl	%esi, %ecx
> +
> +	/* Allocate stack-frame on entry-stack */
> +	subl	%ecx, %edi
> +
> +	/*
> +	 * Save future stack-pointer, we must not switch until the
> +	 * copy is done, otherwise the NMI handler could destroy the
> +	 * contents of the task-stack we are about to copy.
> +	 */
> +	movl	%edi, %ebx
> +
> +	/* Do the copy */
> +	shrl	$2, %ecx
> +	cld
> +	rep movsl
> +
> +	/* Safe to switch to entry-stack now */
> +	movl	%ebx, %esp
> +
> +.Lend_\@:
> +.endm
> +/*
>    * %eax: prev task
>    * %edx: next task
>    */
> @@ -764,6 +877,7 @@ restore_all:
>   
>   restore_all_kernel:
>   	TRACE_IRQS_IRET
> +	PARANOID_EXIT_TO_KERNEL_MODE
>   	RESTORE_REGS 4
>   	jmp	.Lirq_return
>   
> 

I've bisected down a boot breakage on Intel Quark board (config attached) to 
this commit (b92a165df17e, I additionally had to apply d1b47a7c9efc). The kernel 
prints out nothing if this is in.

The board is an Siemens IOT2000, I will check if this can also be triggered on a 
similar Galileo Gen2. Qemu does not like to reproduce it, unfortunately.

The commit look unsuspicious at first glance - maybe it is just changing some 
layout in an unfortunate way. Any ideas?

Thanks,
Jan
diff mbox

Patch

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 7635925..9d6eceb 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,9 @@ 
  * copied there. So allocate the stack-frame on the task-stack and
  * switch to it before we do any copying.
  */
+
+#define CS_FROM_ENTRY_STACK	(1 << 31)
+
 .macro SWITCH_TO_KERNEL_STACK
 
 	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV
@@ -316,6 +319,16 @@ 
 	/* Load top of task-stack into %edi */
 	movl	TSS_entry2task_stack(%edi), %edi
 
+	/*
+	 * Clear unused upper bits of the dword containing the word-sized CS
+	 * slot in pt_regs in case hardware didn't clear it for us.
+	 */
+	andl	$(0x0000ffff), PT_CS(%esp)
+
+	/* Special case - entry from kernel mode via entry stack */
+	testl	$SEGMENT_RPL_MASK, PT_CS(%esp)
+	jz	.Lentry_from_kernel_\@
+
 	/* Bytes to copy */
 	movl	$PTREGS_SIZE, %ecx
 
@@ -329,8 +342,8 @@ 
 	 */
 	addl	$(4 * 4), %ecx
 
-.Lcopy_pt_regs_\@:
 #endif
+.Lcopy_pt_regs_\@:
 
 	/* Allocate frame on task-stack */
 	subl	%ecx, %edi
@@ -346,6 +359,56 @@ 
 	cld
 	rep movsl
 
+	jmp .Lend_\@
+
+.Lentry_from_kernel_\@:
+
+	/*
+	 * This handles the case when we enter the kernel from
+	 * kernel-mode and %esp points to the entry-stack. When this
+	 * happens we need to switch to the task-stack to run C code,
+	 * but switch back to the entry-stack again when we approach
+	 * iret and return to the interrupted code-path. This usually
+	 * happens when we hit an exception while restoring user-space
+	 * segment registers on the way back to user-space.
+	 *
+	 * When we switch to the task-stack here, we can't trust the
+	 * contents of the entry-stack anymore, as the exception handler
+	 * might be scheduled out or moved to another CPU. Therefore we
+	 * copy the complete entry-stack to the task-stack and set a
+	 * marker in the iret-frame (bit 31 of the CS dword) to detect
+	 * what we've done on the iret path.
+	 *
+	 * On the iret path we copy everything back and switch to the
+	 * entry-stack, so that the interrupted kernel code-path
+	 * continues on the same stack it was interrupted with.
+	 *
+	 * Be aware that an NMI can happen anytime in this code.
+	 *
+	 * %esi: Entry-Stack pointer (same as %esp)
+	 * %edi: Top of the task stack
+	 */
+
+	/* Calculate number of bytes on the entry stack in %ecx */
+	movl	%esi, %ecx
+
+	/* %ecx to the top of entry-stack */
+	andl	$(MASK_entry_stack), %ecx
+	addl	$(SIZEOF_entry_stack), %ecx
+
+	/* Number of bytes on the entry stack to %ecx */
+	sub	%esi, %ecx
+
+	/* Mark stackframe as coming from entry stack */
+	orl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
+
+	/*
+	 * %esi and %edi are unchanged, %ecx contains the number of
+	 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
+	 * the stack-frame on task-stack and copy everything over
+	 */
+	jmp .Lcopy_pt_regs_\@
+
 .Lend_\@:
 .endm
 
@@ -404,6 +467,56 @@ 
 .endm
 
 /*
+ * This macro handles the case when we return to kernel-mode on the iret
+ * path and have to switch back to the entry stack.
+ *
+ * See the comments below the .Lentry_from_kernel_\@ label in the
+ * SWITCH_TO_KERNEL_STACK macro for more details.
+ */
+.macro PARANOID_EXIT_TO_KERNEL_MODE
+
+	/*
+	 * Test if we entered the kernel with the entry-stack. Most
+	 * likely we did not, because this code only runs on the
+	 * return-to-kernel path.
+	 */
+	testl	$CS_FROM_ENTRY_STACK, PT_CS(%esp)
+	jz	.Lend_\@
+
+	/* Unlikely slow-path */
+
+	/* Clear marker from stack-frame */
+	andl	$(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
+
+	/* Copy the remaining task-stack contents to entry-stack */
+	movl	%esp, %esi
+	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
+
+	/* Bytes on the task-stack to ecx */
+	movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
+	subl	%esi, %ecx
+
+	/* Allocate stack-frame on entry-stack */
+	subl	%ecx, %edi
+
+	/*
+	 * Save future stack-pointer, we must not switch until the
+	 * copy is done, otherwise the NMI handler could destroy the
+	 * contents of the task-stack we are about to copy.
+	 */
+	movl	%edi, %ebx
+
+	/* Do the copy */
+	shrl	$2, %ecx
+	cld
+	rep movsl
+
+	/* Safe to switch to entry-stack now */
+	movl	%ebx, %esp
+
+.Lend_\@:
+.endm
+/*
  * %eax: prev task
  * %edx: next task
  */
@@ -764,6 +877,7 @@  restore_all:
 
 restore_all_kernel:
 	TRACE_IRQS_IRET
+	PARANOID_EXIT_TO_KERNEL_MODE
 	RESTORE_REGS 4
 	jmp	.Lirq_return