diff mbox series

[07/12] KVM: arm64: nv: Add basic emulation of AT S1E2{R,W}

Message ID 20240625133508.259829-8-maz@kernel.org (mailing list archive)
State New
Headers show
Series KVM: arm64: nv: Add support for address translation instructions | expand

Commit Message

Marc Zyngier June 25, 2024, 1:35 p.m. UTC
Similar to our AT S1E{0,1} emulation, we implement the AT S1E2
handling.

This emulation of course suffers from the same problems, but is
somehow simpler due to the lack of PAN2 and the fact that we are
guaranteed to execute it from the correct context.

Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_asm.h |  1 +
 arch/arm64/kvm/at.c              | 57 ++++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 9b6c9f4f4d885..6ec0622969766 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -237,6 +237,7 @@  extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
 
 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
 extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index eb0aa49e61f68..147df5a9cc4e0 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -195,3 +195,60 @@  void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
 
 	write_unlock(&vcpu->kvm->mmu_lock);
 }
+
+void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
+{
+	struct kvm_s2_mmu *mmu;
+	unsigned long flags;
+	u64 val, hcr, par;
+	bool fail;
+
+	write_lock(&vcpu->kvm->mmu_lock);
+
+	mmu = &vcpu->kvm->arch.mmu;
+
+	/*
+	 * We've trapped, so everything is live on the CPU. As we will
+	 * be switching context behind everybody's back, disable
+	 * interrupts...
+	 */
+	local_irq_save(flags);
+
+	val = hcr = read_sysreg(hcr_el2);
+	val &= ~HCR_TGE;
+	val |= HCR_VM;
+
+	if (!vcpu_el2_e2h_is_set(vcpu))
+		val |= HCR_NV | HCR_NV1;
+
+	write_sysreg(val, hcr_el2);
+	isb();
+
+	switch (op) {
+	case OP_AT_S1E2R:
+		fail = __kvm_at(OP_AT_S1E1R, vaddr);
+		break;
+	case OP_AT_S1E2W:
+		fail = __kvm_at(OP_AT_S1E1W, vaddr);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		fail = true;
+	}
+
+	isb();
+
+	if (!fail)
+		par = read_sysreg_par();
+	else
+		par = SYS_PAR_EL1_F;
+
+	write_sysreg(hcr, hcr_el2);
+	isb();
+
+	local_irq_restore(flags);
+
+	write_unlock(&vcpu->kvm->mmu_lock);
+
+	vcpu_write_sys_reg(vcpu, par, PAR_EL1);
+}