Message ID | 20240926032244.3666579-2-shahuang@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Allow the RAS feature bit in ID_AA64PFR0_EL1 writable from userspace | expand |
On Wed, Sep 25, 2024 at 11:22:39PM -0400, Shaoqin Huang wrote: > diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c > index d7c2990e7c9e..99f256629ead 100644 > --- a/arch/arm64/kvm/handle_exit.c > +++ b/arch/arm64/kvm/handle_exit.c > @@ -405,7 +405,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) > void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) > { > if (ARM_SERROR_PENDING(exception_index)) { > - if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) { > + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) { > u64 disr = kvm_vcpu_get_disr(vcpu); > > kvm_handle_guest_serror(vcpu, disr_to_esr(disr)); This is wrong; this is about handling *physical* SErrors, not virtual ones. So it really ought to be keyed off of the host cpucap. > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h > index 37ff87d782b6..bf176a3cc594 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h > @@ -272,7 +272,7 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr) > > write_sysreg(hcr, hcr_el2); > > - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) > + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP) && (hcr & HCR_VSE)) > write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); > } I don't think this should be conditioned on guest visibility either. If FEAT_RAS is implemented in hardware, ESR_EL1 is set to the value of VSESR_EL2 when the vSError is taken, no matter what. > diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > index 4c0fdabaf8ae..98526556d4e5 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > @@ -105,6 +105,8 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) > > static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) > { > + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); > + > ctxt->regs.pc = read_sysreg_el2(SYS_ELR); > /* > * Guest PSTATE gets saved at guest fixup time in all > @@ -113,7 +115,7 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) > if (!has_vhe() && ctxt->__hyp_running_vcpu) > ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); > > - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) > + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) > ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); > } > > @@ -220,6 +222,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx > { > u64 pstate = to_hw_pstate(ctxt); > u64 mode = pstate & PSR_AA32_MODE_MASK; > + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); > > /* > * Safety check to ensure we're setting the CPU up to enter the guest > @@ -238,7 +241,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx > write_sysreg_el2(ctxt->regs.pc, SYS_ELR); > write_sysreg_el2(pstate, SYS_SPSR); > > - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) > + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) > write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2); > } These registers are still stateful no matter what, we cannot prevent an ESB instruction inside the VM from consuming a pending vSError. Keep in mind the ESB instruction is a NOP without FEAT_RAS, so it is still a legal instruction for a VM w/o FEAT_RAS. > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 31e49da867ff..b09f8ba3525b 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -4513,7 +4513,7 @@ static void vcpu_set_hcr(struct kvm_vcpu *vcpu) > > if (has_vhe() || has_hvhe()) > vcpu->arch.hcr_el2 |= HCR_E2H; > - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { > + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { > /* route synchronous external abort exceptions to EL2 */ > vcpu->arch.hcr_el2 |= HCR_TEA; > /* trap error record accesses */ No, we want external aborts to be taken to EL2. Wouldn't this also have the interesting property of allowing a VM w/o FEAT_RAS to access the error record registers?
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 11098eb7eb44..938e3cd05d1e 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -819,7 +819,7 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); - events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN); + events->exception.serror_has_esr = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP); if (events->exception.serror_pending && events->exception.serror_has_esr) events->exception.serror_esr = vcpu_get_vsesr(vcpu); @@ -841,7 +841,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool ext_dabt_pending = events->exception.ext_dabt_pending; if (serror_pending && has_esr) { - if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + if (!kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) return -EINVAL; if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index d7c2990e7c9e..99f256629ead 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -405,7 +405,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) { if (ARM_SERROR_PENDING(exception_index)) { - if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) { + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) { u64 disr = kvm_vcpu_get_disr(vcpu); kvm_handle_guest_serror(vcpu, disr_to_esr(disr)); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 37ff87d782b6..bf176a3cc594 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -272,7 +272,7 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr) write_sysreg(hcr, hcr_el2); - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); } diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index 4c0fdabaf8ae..98526556d4e5 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -105,6 +105,8 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) { + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); + ctxt->regs.pc = read_sysreg_el2(SYS_ELR); /* * Guest PSTATE gets saved at guest fixup time in all @@ -113,7 +115,7 @@ static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) if (!has_vhe() && ctxt->__hyp_running_vcpu) ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); } @@ -220,6 +222,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx { u64 pstate = to_hw_pstate(ctxt); u64 mode = pstate & PSR_AA32_MODE_MASK; + struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); /* * Safety check to ensure we're setting the CPU up to enter the guest @@ -238,7 +241,7 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx write_sysreg_el2(ctxt->regs.pc, SYS_ELR); write_sysreg_el2(pstate, SYS_SPSR); - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + if (kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, RAS, IMP)) write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 31e49da867ff..b09f8ba3525b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -4513,7 +4513,7 @@ static void vcpu_set_hcr(struct kvm_vcpu *vcpu) if (has_vhe() || has_hvhe()) vcpu->arch.hcr_el2 |= HCR_E2H; - if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { + if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) { /* route synchronous external abort exceptions to EL2 */ vcpu->arch.hcr_el2 |= HCR_TEA; /* trap error record accesses */
Use kvm_has_feat() to check if FEAT_RAS is advertised to the guest, this is useful when FEAT_RAS is writable. Signed-off-by: Shaoqin Huang <shahuang@redhat.com> --- arch/arm64/kvm/guest.c | 4 ++-- arch/arm64/kvm/handle_exit.c | 2 +- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 7 +++++-- arch/arm64/kvm/sys_regs.c | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-)