@@ -102,7 +102,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
BUG_ON(!p->is_write);
- val = *vcpu_reg(vcpu, p->Rt);
+ val = *p->val;
if (!p->is_aarch32) {
vcpu_sys_reg(vcpu, r->reg) = val;
} else {
@@ -125,13 +125,10 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- val = *vcpu_reg(vcpu, p->Rt);
- vgic_v3_dispatch_sgi(vcpu, val);
+ vgic_v3_dispatch_sgi(vcpu, *p->val);
return true;
}
@@ -153,7 +150,7 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
- *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+ *p->val = (1 << 3);
return true;
}
}
@@ -167,7 +164,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
} else {
u32 val;
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
- *vcpu_reg(vcpu, p->Rt) = val;
+ *p->val = val;
return true;
}
}
@@ -204,10 +201,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu_sys_reg(vcpu, r->reg) = *p->val;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ *p->val = vcpu_sys_reg(vcpu, r->reg);
}
trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
@@ -704,10 +701,10 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
u32 el3 = !!((pfr >> 12) & 0xf);
- *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
- (((dfr >> 12) & 0xf) << 24) |
- (((dfr >> 28) & 0xf) << 20) |
- (6 << 16) | (el3 << 14) | (el3 << 12));
+ *p->val = ((((dfr >> 20) & 0xf) << 28) |
+ (((dfr >> 12) & 0xf) << 24) |
+ (((dfr >> 28) & 0xf) << 20) |
+ (6 << 16) | (el3 << 14) | (el3 << 12));
return true;
}
}
@@ -717,10 +714,10 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (p->is_write) {
- vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu_cp14(vcpu, r->reg) = *p->val;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
- *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+ *p->val = vcpu_cp14(vcpu, r->reg);
}
return true;
@@ -1069,12 +1066,14 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (hsr >> 5) & 0xf;
int Rt2 = (hsr >> 10) & 0xf;
+ unsigned long val;
params.is_aarch32 = true;
params.is_32bit = false;
params.CRm = (hsr >> 1) & 0xf;
- params.Rt = (hsr >> 5) & 0xf;
+ params.val = &val;
params.is_write = ((hsr & 1) == 0);
params.Op0 = 0;
@@ -1083,15 +1082,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
params.CRn = 0;
/*
- * Massive hack here. Store Rt2 in the top 32bits so we only
- * have one register to deal with. As we use the same trap
+ * Make a 64-bit value out of Rt and Rt2. As we use the same trap
* backends between AArch32 and AArch64, we get away with it.
*/
if (params.is_write) {
- u64 val = *vcpu_reg(vcpu, params.Rt);
- val &= 0xffffffff;
+ val = *vcpu_reg(vcpu, Rt) & 0xffffffff;
val |= *vcpu_reg(vcpu, Rt2) << 32;
- *vcpu_reg(vcpu, params.Rt) = val;
}
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
@@ -1102,11 +1098,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
unhandled_cp_access(vcpu, ¶ms);
out:
- /* Do the opposite hack for the read side */
+ /* Split up the value between registers for the read side */
if (!params.is_write) {
- u64 val = *vcpu_reg(vcpu, params.Rt);
- val >>= 32;
- *vcpu_reg(vcpu, Rt2) = val;
+ *vcpu_reg(vcpu, Rt) = val & 0xffffffff;
+ *vcpu_reg(vcpu, Rt2) = val >> 32;
}
return 1;
@@ -1125,11 +1120,12 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (hsr >> 5) & 0xf;
params.is_aarch32 = true;
params.is_32bit = true;
params.CRm = (hsr >> 1) & 0xf;
- params.Rt = (hsr >> 5) & 0xf;
+ params.val = vcpu_reg(vcpu, Rt);
params.is_write = ((hsr & 1) == 0);
params.CRn = (hsr >> 10) & 0xf;
params.Op0 = 0;
@@ -1237,6 +1233,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ int Rt = (esr >> 5) & 0x1f;
trace_kvm_handle_sys_reg(esr);
@@ -1247,7 +1244,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
- params.Rt = (esr >> 5) & 0x1f;
+ params.val = vcpu_reg(vcpu, Rt);
params.is_write = !(esr & 1);
return emulate_sys_reg(vcpu, ¶ms);
@@ -28,7 +28,7 @@ struct sys_reg_params {
u8 CRn;
u8 CRm;
u8 Op2;
- u8 Rt;
+ u_long *val;
bool is_write;
bool is_aarch32;
bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -79,7 +79,7 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p)
{
- *vcpu_reg(vcpu, p->Rt) = 0;
+ *p->val = 0;
return true;
}
@@ -37,7 +37,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
if (p->is_write)
return ignore_write(vcpu, p);
- *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
+ *p->val = vcpu_sys_reg(vcpu, ACTLR_EL1);
return true;
}
Replace Rt with data pointer in struct sys_reg_params. This will allow to reuse system register handling code in implementation of vGICv3 CPU interface access API. Additionally, got rid of "massive hack" in kvm_handle_cp_64(). Signed-off-by: Pavel Fedin <p.fedin@samsung.com> --- arch/arm64/kvm/sys_regs.c | 51 +++++++++++++++++------------------- arch/arm64/kvm/sys_regs.h | 4 +-- arch/arm64/kvm/sys_regs_generic_v8.c | 2 +- 3 files changed, 27 insertions(+), 30 deletions(-)