@@ -231,13 +231,14 @@ static void do_nothing(void *info)
* Fortunately this is so rare (we don't usually need the instruction), we
* can go very slowly and noone will mind.
*/
-static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
+static int copy_from_guest(struct kvm_vcpu *vcpu,
+ void *dest,
+ unsigned long gva,
+ size_t len)
{
int i;
bool ret;
struct kvm_vcpu *v;
- bool is_thumb;
- size_t instr_len;
/* Don't cross with IPIs in kvm_main.c */
spin_lock(&vcpu->kvm->mmu_lock);
@@ -256,33 +257,16 @@ static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
smp_call_function_single(v->cpu, do_nothing, NULL, 1);
}
-
- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- instr_len = (is_thumb) ? 2 : 4;
-
- BUG_ON(!is_thumb && *vcpu_pc(vcpu) & 0x3);
-
/* Now guest isn't running, we can va->pa map and copy atomically. */
- ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu), instr_len,
- vcpu_mode_priv(vcpu));
- if (!ret)
- goto out;
-
- /* A 32-bit thumb2 instruction can actually go over a page boundary! */
- if (is_thumb && is_wide_instruction(*instr)) {
- *instr = *instr << 16;
- ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu) + 2, 2,
- vcpu_mode_priv(vcpu));
- }
+ ret = copy_from_guest_va(vcpu, dest, gva, len, vcpu_mode_priv(vcpu));
-out:
/* Release them all. */
kvm_for_each_vcpu(i, v, vcpu->kvm)
v->arch.pause = false;
spin_unlock(&vcpu->kvm->mmu_lock);
- return ret;
+ return ret ? 0 : -EFAULT;
}
/******************************************************************************
@@ -297,6 +281,9 @@ enum SRType {
};
struct arm_insn {
+ /* Encoded instruction. */
+ u32 instr;
+
/* Decoding for the register write back */
bool register_form;
u32 imm;
@@ -545,14 +532,16 @@ static const struct arm_decode arm_decode[] = {
};
static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu,
- u32 instr, struct arm_insn *ai)
+ unsigned long instr, struct arm_insn *ai)
{
int i;
for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
const struct arm_decode *d = &arm_decode[i];
if ((instr & d->opc_mask) == d->opc) {
- *ai = d->template;
+ ai->len = d->template.len;
+ ai->w = d->template.w;
+ ai->sign_extend = d->template.sign_extend;
return d->decode(vcpu, instr, ai);
}
}
@@ -678,7 +667,7 @@ static const struct thumb_decode thumb_decode[] = {
static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
- u32 instr, struct arm_insn *ti)
+ unsigned long instr, struct arm_insn *ti)
{
bool is16;
int i;
@@ -715,14 +704,38 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
return false;
}
-static int kvm_decode_ls(struct kvm_vcpu *vcpu, u32 instr, u32 psr,
- struct arm_insn *ai)
+static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
+ struct arm_insn *ai)
{
bool is_thumb = !!(psr & PSR_T_BIT);
+ unsigned int instr_len = is_thumb ? 2 : 4;
+ int err;
+
+ BUG_ON(!is_thumb && (pc & 0x3));
- if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, ai))
+ /* Zero out high bits for thumb case, and so it's set on error. */
+ ai->instr = 0;
+
+ /* Now guest isn't running, we can va->pa map and copy atomically. */
+ err = copy_from_guest(vcpu, &ai->instr, pc, instr_len);
+ if (err)
+ return err;
+
+ /*
+ * Is it a 32 bit thumb instruction? Can't get it all in 1 go, since
+ * it can actually go over a page boundary.
+ */
+ if (is_thumb && is_wide_instruction(ai->instr)) {
+ ai->instr = ai->instr << 16;
+ err = copy_from_guest(vcpu, &ai->instr,
+ pc + instr_len, instr_len);
+ if (err)
+ return err;
+ }
+
+ if (!is_thumb && !kvm_decode_arm_ls(vcpu, ai->instr, ai))
return -ENOENT;
- else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, ai))
+ else if (is_thumb && !kvm_decode_thumb_ls(vcpu, ai->instr, ai))
return -ENOENT;
return 0;
@@ -777,29 +790,25 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
bool is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- unsigned long instr = 0;
struct arm_insn insn;
-
- trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu));
-
- /* If it fails (SMP race?), we reenter guest for it to retry. */
- if (!copy_current_insn(vcpu, &instr))
- return 1;
+ int err;
mmio->phys_addr = fault_ipa;
- if (kvm_decode_ls(vcpu, instr, *vcpu_cpsr(vcpu), &insn) != 0) {
- kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=%i)"
- "pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
+ err = kvm_decode(vcpu, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu), &insn);
+ if (err) {
+ kvm_debug("Unable to decode inst: %#08x (cpsr: %#08x (T=%i)"
+ "pc: %#08x) - %i\n",
+ insn.instr, *vcpu_cpsr(vcpu), is_thumb,
+ *vcpu_pc(vcpu), err);
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
if (!execute(vcpu, mmio, &insn)) {
- kvm_debug("Unable to execute inst: %#08lx (cpsr: %#08x (T=%i)"
+ kvm_debug("Unable to execute inst: %#08x (cpsr: %#08x (T=%i)"
"pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
+ insn.instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
@@ -808,7 +817,7 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
- kvm_skip_instr(vcpu, is_wide_instruction(instr));
+ kvm_skip_instr(vcpu, insn.is_thumb32);
return 0;
}