diff mbox

KVM: PPC: Book3S HV: Fix duplication of host SLB entries

Message ID 20180321225046.GA18060@fergus.ozlabs.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Mackerras March 21, 2018, 10:50 p.m. UTC
Since commit 6964e6a4e489 ("KVM: PPC: Book3S HV: Do SLB load/unload
with guest LPCR value loaded", 2018-01-11), we have been seeing
occasional machine check interrupts on POWER8 systems when running
KVM guests, due to SLB multihit errors.

This turns out to be due to the guest exit code reloading the host
SLB entries from the SLB shadow buffer when the SLB was not previously
cleared in the guest entry path.  This can happen because the path
which skips from the guest entry code to the guest exit code without
entering the guest now does the skip before the SLB is cleared and
loaded with guest values, but the host values are loaded after the
point in the guest exit path that we skip to.

To fix this, we move the code that reloads the host SLB values up
so that it occurs just before the point in the guest exit code (the
label guest_bypass:) where we skip to from the guest entry path.

Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Fixes: 6964e6a4e489 ("KVM: PPC: Book3S HV: Do SLB load/unload with guest LPCR value loaded")
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
---
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 35 +++++++++++++++++----------------
 1 file changed, 18 insertions(+), 17 deletions(-)

Comments

Alexey Kardashevskiy March 23, 2018, 2:32 a.m. UTC | #1
On 22/3/18 9:50 am, Paul Mackerras wrote:
> Since commit 6964e6a4e489 ("KVM: PPC: Book3S HV: Do SLB load/unload
> with guest LPCR value loaded", 2018-01-11), we have been seeing
> occasional machine check interrupts on POWER8 systems when running
> KVM guests, due to SLB multihit errors.
> 
> This turns out to be due to the guest exit code reloading the host
> SLB entries from the SLB shadow buffer when the SLB was not previously
> cleared in the guest entry path.  This can happen because the path
> which skips from the guest entry code to the guest exit code without
> entering the guest now does the skip before the SLB is cleared and
> loaded with guest values, but the host values are loaded after the
> point in the guest exit path that we skip to.
> 
> To fix this, we move the code that reloads the host SLB values up
> so that it occurs just before the point in the guest exit code (the
> label guest_bypass:) where we skip to from the guest entry path.
> 
> Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> Fixes: 6964e6a4e489 ("KVM: PPC: Book3S HV: Do SLB load/unload with guest LPCR value loaded")
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>


Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>



> ---
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S | 35 +++++++++++++++++----------------
>  1 file changed, 18 insertions(+), 17 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index d332646..f86a202 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -1557,6 +1557,24 @@ mc_cont:
>  	ptesync
>  3:	stw	r5,VCPU_SLB_MAX(r9)
>  
> +	/* load host SLB entries */
> +BEGIN_MMU_FTR_SECTION
> +	b	0f
> +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
> +	ld	r8,PACA_SLBSHADOWPTR(r13)
> +
> +	.rept	SLB_NUM_BOLTED
> +	li	r3, SLBSHADOW_SAVEAREA
> +	LDX_BE	r5, r8, r3
> +	addi	r3, r3, 8
> +	LDX_BE	r6, r8, r3
> +	andis.	r7,r5,SLB_ESID_V@h
> +	beq	1f
> +	slbmte	r6,r5
> +1:	addi	r8,r8,16
> +	.endr
> +0:
> +
>  guest_bypass:
>  	stw	r12, STACK_SLOT_TRAP(r1)
>  	mr 	r3, r12
> @@ -2018,23 +2036,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
>  	mtspr	SPRN_LPCR,r8
>  	isync
>  48:
> -	/* load host SLB entries */
> -BEGIN_MMU_FTR_SECTION
> -	b	0f
> -END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
> -	ld	r8,PACA_SLBSHADOWPTR(r13)
> -
> -	.rept	SLB_NUM_BOLTED
> -	li	r3, SLBSHADOW_SAVEAREA
> -	LDX_BE	r5, r8, r3
> -	addi	r3, r3, 8
> -	LDX_BE	r6, r8, r3
> -	andis.	r7,r5,SLB_ESID_V@h
> -	beq	1f
> -	slbmte	r6,r5
> -1:	addi	r8,r8,16
> -	.endr
> -0:
>  #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
>  	/* Finish timing, if we have a vcpu */
>  	ld	r4, HSTATE_KVM_VCPU(r13)
>
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index d332646..f86a202 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1557,6 +1557,24 @@  mc_cont:
 	ptesync
 3:	stw	r5,VCPU_SLB_MAX(r9)
 
+	/* load host SLB entries */
+BEGIN_MMU_FTR_SECTION
+	b	0f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
+	ld	r8,PACA_SLBSHADOWPTR(r13)
+
+	.rept	SLB_NUM_BOLTED
+	li	r3, SLBSHADOW_SAVEAREA
+	LDX_BE	r5, r8, r3
+	addi	r3, r3, 8
+	LDX_BE	r6, r8, r3
+	andis.	r7,r5,SLB_ESID_V@h
+	beq	1f
+	slbmte	r6,r5
+1:	addi	r8,r8,16
+	.endr
+0:
+
 guest_bypass:
 	stw	r12, STACK_SLOT_TRAP(r1)
 	mr 	r3, r12
@@ -2018,23 +2036,6 @@  END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	mtspr	SPRN_LPCR,r8
 	isync
 48:
-	/* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
-	b	0f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-	ld	r8,PACA_SLBSHADOWPTR(r13)
-
-	.rept	SLB_NUM_BOLTED
-	li	r3, SLBSHADOW_SAVEAREA
-	LDX_BE	r5, r8, r3
-	addi	r3, r3, 8
-	LDX_BE	r6, r8, r3
-	andis.	r7,r5,SLB_ESID_V@h
-	beq	1f
-	slbmte	r6,r5
-1:	addi	r8,r8,16
-	.endr
-0:
 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
 	/* Finish timing, if we have a vcpu */
 	ld	r4, HSTATE_KVM_VCPU(r13)