diff mbox series

[1/8] KVM: arm64: Move AArch32 exceptions over to AArch64 sysregs

Message ID 20201102191609.265711-2-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Kill the copro array | expand

Commit Message

Marc Zyngier Nov. 2, 2020, 7:16 p.m. UTC
The use of the AArch32-specific accessors have always been a bit
annoying on 64bit, and it is time for a change.

Let's move the AArch32 exception injection over to the AArch64 encoding,
which requires us to split the two halves of FAR_EL1 into DFAR and IFAR.
This enables us to drop the preempt_disable() games on VHE, and to kill
the last user of the vcpu_cp15() macro.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h |  1 -
 arch/arm64/kvm/inject_fault.c     | 62 ++++++++++---------------------
 2 files changed, 20 insertions(+), 43 deletions(-)

Comments

James Morse Nov. 3, 2020, 6:29 p.m. UTC | #1
Hi Marc,

On 02/11/2020 19:16, Marc Zyngier wrote:
> The use of the AArch32-specific accessors have always been a bit
> annoying on 64bit, and it is time for a change.
> 
> Let's move the AArch32 exception injection over to the AArch64 encoding,
> which requires us to split the two halves of FAR_EL1 into DFAR and IFAR.
> This enables us to drop the preempt_disable() games on VHE, and to kill
> the last user of the vcpu_cp15() macro.

Hurrah!


> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index e2a2e48ca371..975f65ba6a8b 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -100,39 +81,36 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
>   * Modelled after TakeDataAbortException() and TakePrefetchAbortException
>   * pseudocode.
>   */
> -static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
> -			 unsigned long addr)
> +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
>  {
> -	u32 *far, *fsr;
> -	bool is_lpae;
> -	bool loaded;
> +	u64 far;
> +	u32 fsr;


> +	/* Give the guest an IMPLEMENTATION DEFINED exception */
> +	if (__vcpu_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {

With VHE, won't __vcpu_sys_reg() read the potentially stale copy in the sys_reg array?

vcpu_read_sys_reg()?


> +		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
> +	} else {
> +		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
> +		fsr = DFSR_FSC_EXTABT_nLPAE;
> +	}
>  
> -	loaded = pre_fault_synchronize(vcpu);
> +	far = vcpu_read_sys_reg(vcpu, FAR_EL1);


Thanks,

James
Marc Zyngier Nov. 10, 2020, 10:01 a.m. UTC | #2
On 2020-11-03 18:29, James Morse wrote:
> Hi Marc,
> 
> On 02/11/2020 19:16, Marc Zyngier wrote:
>> The use of the AArch32-specific accessors have always been a bit
>> annoying on 64bit, and it is time for a change.
>> 
>> Let's move the AArch32 exception injection over to the AArch64 
>> encoding,
>> which requires us to split the two halves of FAR_EL1 into DFAR and 
>> IFAR.
>> This enables us to drop the preempt_disable() games on VHE, and to 
>> kill
>> the last user of the vcpu_cp15() macro.
> 
> Hurrah!
> 
> 
>> diff --git a/arch/arm64/kvm/inject_fault.c 
>> b/arch/arm64/kvm/inject_fault.c
>> index e2a2e48ca371..975f65ba6a8b 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -100,39 +81,36 @@ static void inject_undef32(struct kvm_vcpu *vcpu)
>>   * Modelled after TakeDataAbortException() and 
>> TakePrefetchAbortException
>>   * pseudocode.
>>   */
>> -static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>> -			 unsigned long addr)
>> +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 
>> addr)
>>  {
>> -	u32 *far, *fsr;
>> -	bool is_lpae;
>> -	bool loaded;
>> +	u64 far;
>> +	u32 fsr;
> 
> 
>> +	/* Give the guest an IMPLEMENTATION DEFINED exception */
>> +	if (__vcpu_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
> 
> With VHE, won't __vcpu_sys_reg() read the potentially stale copy in
> the sys_reg array?
> 
> vcpu_read_sys_reg()?

Of course you are right. Now fixed.

Thanks,

         M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7a1faf917f3c..a6778c39157d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -561,7 +561,6 @@  static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
 #define CPx_BIAS		IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
 
 #define vcpu_cp14(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
-#define vcpu_cp15(v,r)		((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
 
 struct kvm_vm_stat {
 	ulong remote_tlb_flush;
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e2a2e48ca371..975f65ba6a8b 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -69,26 +69,7 @@  static void inject_undef64(struct kvm_vcpu *vcpu)
 #define DFSR_FSC_EXTABT_LPAE	0x10
 #define DFSR_FSC_EXTABT_nLPAE	0x08
 #define DFSR_LPAE		BIT(9)
-
-static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
-{
-	preempt_disable();
-	if (vcpu->arch.sysregs_loaded_on_cpu) {
-		kvm_arch_vcpu_put(vcpu);
-		return true;
-	}
-
-	preempt_enable();
-	return false;
-}
-
-static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
-{
-	if (loaded) {
-		kvm_arch_vcpu_load(vcpu, smp_processor_id());
-		preempt_enable();
-	}
-}
+#define TTBCR_EAE		BIT(31)
 
 static void inject_undef32(struct kvm_vcpu *vcpu)
 {
@@ -100,39 +81,36 @@  static void inject_undef32(struct kvm_vcpu *vcpu)
  * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  * pseudocode.
  */
-static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
-			 unsigned long addr)
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
 {
-	u32 *far, *fsr;
-	bool is_lpae;
-	bool loaded;
+	u64 far;
+	u32 fsr;
+
+	/* Give the guest an IMPLEMENTATION DEFINED exception */
+	if (__vcpu_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
+		fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
+	} else {
+		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
+		fsr = DFSR_FSC_EXTABT_nLPAE;
+	}
 
-	loaded = pre_fault_synchronize(vcpu);
+	far = vcpu_read_sys_reg(vcpu, FAR_EL1);
 
 	if (is_pabt) {
 		vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_IABT |
 				     KVM_ARM64_PENDING_EXCEPTION);
-		far = &vcpu_cp15(vcpu, c6_IFAR);
-		fsr = &vcpu_cp15(vcpu, c5_IFSR);
+		far &= GENMASK(31, 0);
+		far |= (u64)addr << 32;
+		vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
 	} else { /* !iabt */
 		vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA32_DABT |
 				     KVM_ARM64_PENDING_EXCEPTION);
-		far = &vcpu_cp15(vcpu, c6_DFAR);
-		fsr = &vcpu_cp15(vcpu, c5_DFSR);
-	}
-
-	*far = addr;
-
-	/* Give the guest an IMPLEMENTATION DEFINED exception */
-	is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
-	if (is_lpae) {
-		*fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
-	} else {
-		/* no need to shuffle FS[4] into DFSR[10] as its 0 */
-		*fsr = DFSR_FSC_EXTABT_nLPAE;
+		far &= GENMASK(63, 32);
+		far |= addr;
+		vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
 	}
 
-	post_fault_synchronize(vcpu, loaded);
+	vcpu_write_sys_reg(vcpu, far, FAR_EL1);
 }
 
 /**