@@ -41,7 +41,17 @@ static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
return vcpu_reg(vcpu, reg_num);
}
-unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
+unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
+
+static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
+{
+ return *__vcpu_spsr(vcpu);
+}
+
+static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
+{
+ *__vcpu_spsr(vcpu) = v;
+}
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
@@ -142,7 +142,7 @@ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
-unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
+unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
@@ -26,6 +26,7 @@
#include <asm/esr.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_hyp.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
@@ -143,13 +144,43 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}
-/* Get vcpu SPSR for current mode */
-static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
+static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
{
- if (vcpu_mode_is_32bit(vcpu))
- return vcpu_spsr32(vcpu);
+ unsigned long *p = (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
+
+ if (vcpu_mode_is_32bit(vcpu)) {
+ unsigned long *p_32bit = vcpu_spsr32(vcpu);
+
+ /* KVM_SPSR_SVC aliases KVM_SPSR_EL1 */
+ if (p_32bit != p)
+ return *p_32bit;
+ }
+
+ if (vcpu->arch.sysregs_loaded_on_cpu)
+ return read_sysreg_el1(spsr);
+ else
+ return *p;
+}
- return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
+static inline void vcpu_write_spsr(const struct kvm_vcpu *vcpu, unsigned long v)
+{
+ unsigned long *p = (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
+
+ /* KVM_SPSR_SVC aliases KVM_SPSR_EL1 */
+ if (vcpu_mode_is_32bit(vcpu)) {
+ unsigned long *p_32bit = vcpu_spsr32(vcpu);
+
+ /* KVM_SPSR_SVC aliases KVM_SPSR_EL1 */
+ if (p_32bit != p) {
+ *p_32bit = v;
+ return;
+ }
+ }
+
+ if (vcpu->arch.sysregs_loaded_on_cpu)
+ write_sysreg_el1(v, spsr);
+ else
+ *p = v;
}
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
@@ -71,7 +71,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
- *vcpu_spsr(vcpu) = cpsr;
+ vcpu_write_spsr(vcpu, cpsr);
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
@@ -106,7 +106,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
- *vcpu_spsr(vcpu) = cpsr;
+ vcpu_write_spsr(vcpu, cpsr);
/*
* Build an unknown exception, depending on the instruction
@@ -178,7 +178,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_cpsr(vcpu) = cpsr;
/* Note: These now point to the banked copies */
- *vcpu_spsr(vcpu) = new_spsr_value;
+ vcpu_write_spsr(vcpu, new_spsr_value);
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */