@@ -130,3 +130,75 @@ void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
die("KVM_GET_ONE_REG failed (LR_svc)");
dprintf(debug_fd, " LR_svc: 0x%x\n", data);
}
+
+u64 kvm_cpu__smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+ struct kvm_one_reg reg;
+ u32 data;
+
+ reg.addr = (u64)&data;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r1);
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_ONE_REG, ®) < 0)
+ die("KVM_GET_ONE_REG failed (r1)");
+
+ return data;
+}
+
+u64 kvm_cpu__smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+ struct kvm_one_reg reg;
+ u32 data;
+
+ reg.addr = (u64)&data;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r2);
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_ONE_REG, ®) < 0)
+ die("KVM_GET_ONE_REG failed (r2)");
+
+ return data;
+}
+
+void kvm_cpu__smccc_return(struct kvm_cpu *vcpu, u64 a0, u64 a1, u64 a2, u64 a3)
+{
+ struct kvm_one_reg reg;
+ u32 data;
+
+ reg.addr = (u64)&data;
+
+ data = (u32)a0;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r0);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (r0)");
+
+ data = (u32)a1;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r1);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (r1)");
+
+ data = (u32)a2;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r2);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (r2)");
+
+ data = (u32)a3;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_r3);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (r3)");
+}
+
+void kvm_cpu__psci_set_entry(struct kvm_cpu *vcpu, u64 entry_addr, u64 context_id)
+{
+ struct kvm_one_reg reg;
+ u32 data;
+
+ reg.addr = (u64)&data;
+
+ data = (u32)entry_addr;
+ reg.id = ARM_CORE_REG(usr_regs.ARM_pc);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (pc)");
+
+ data = (u32)context_id;
+ reg.id = ARM64_CORE_REG(usr_regs.ARM_r0);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (r0)");
+}
@@ -254,3 +254,69 @@ void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
die("KVM_GET_ONE_REG failed (lr)");
dprintf(debug_fd, " LR: 0x%lx\n", data);
}
+
+u64 kvm_cpu__smccc_get_arg1(struct kvm_cpu *vcpu)
+{
+ struct kvm_one_reg reg;
+ u64 data;
+
+ reg.addr = (u64)&data;
+ reg.id = ARM64_CORE_REG(regs.regs[1]);
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_ONE_REG, ®) < 0)
+ die("KVM_GET_ONE_REG failed (x1)");
+
+ return data;
+}
+
+u64 kvm_cpu__smccc_get_arg2(struct kvm_cpu *vcpu)
+{
+ struct kvm_one_reg reg;
+ u64 data;
+
+ reg.addr = (u64)&data;
+ reg.id = ARM64_CORE_REG(regs.regs[2]);
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_ONE_REG, ®) < 0)
+ die("KVM_GET_ONE_REG failed (x2)");
+
+ return data;
+}
+
+void kvm_cpu__smccc_return(struct kvm_cpu *vcpu, u64 a0, u64 a1, u64 a2, u64 a3)
+{
+ struct kvm_one_reg reg;
+
+ reg.addr = (u64)&a0;
+ reg.id = ARM64_CORE_REG(regs.regs[0]);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (x0)");
+
+ reg.addr = (u64)&a1;
+ reg.id = ARM64_CORE_REG(regs.regs[1]);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (x1)");
+
+ reg.addr = (u64)&a2;
+ reg.id = ARM64_CORE_REG(regs.regs[2]);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (x2)");
+
+ reg.addr = (u64)&a3;
+ reg.id = ARM64_CORE_REG(regs.regs[3]);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (x3)");
+}
+
+void kvm_cpu__psci_set_entry(struct kvm_cpu *vcpu, u64 entry_addr, u64 context_id)
+{
+ struct kvm_one_reg reg;
+
+ reg.addr = (u64)&entry_addr;
+ reg.id = ARM64_CORE_REG(regs.pc);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (pc)");
+
+ reg.addr = (u64)&context_id;
+ reg.id = ARM64_CORE_REG(regs.regs[0]);
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_ONE_REG, ®) < 0)
+ die("KVM_SET_ONE_REG failed (x0)");
+}
@@ -59,4 +59,9 @@ static inline bool kvm_cpu__emulate_mmio(struct kvm_cpu *vcpu, u64 phys_addr,
unsigned long kvm_cpu__get_vcpu_mpidr(struct kvm_cpu *vcpu);
+u64 kvm_cpu__smccc_get_arg1(struct kvm_cpu *vcpu);
+u64 kvm_cpu__smccc_get_arg2(struct kvm_cpu *vcpu);
+void kvm_cpu__smccc_return(struct kvm_cpu *vcpu, u64 a0, u64 a1, u64 a2, u64 a3);
+void kvm_cpu__psci_set_entry(struct kvm_cpu *vcpu, u64 entry_addr, u64 context_id);
+
#endif /* ARM_COMMON__KVM_CPU_ARCH_H */
@@ -163,3 +163,84 @@ bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
{
}
+
+static void kvm_cpu__arm_suspend(struct kvm_cpu *vcpu)
+{
+ struct kvm_mp_state mp_state = {
+ .mp_state = KVM_MP_STATE_SUSPENDED,
+ };
+
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_MP_STATE, &mp_state) < 0)
+ die("KVM_SET_MP_STATE failed");
+}
+
+static void kvm_cpu__wakeup(struct kvm_cpu *vcpu)
+{
+ u64 entry_addr, context_id;
+
+ entry_addr = kvm_cpu__smccc_get_arg1(vcpu);
+ context_id = kvm_cpu__smccc_get_arg2(vcpu);
+
+ /*
+ * The resuming CPU could have been a secondary CPU at boot. Ensure the
+ * vCPU is made runnable.
+ */
+ vcpu->init.features[0] &= ~(1ul << KVM_ARM_VCPU_POWER_OFF);
+
+ kvm_cpu__arch_reinit(vcpu);
+ kvm_cpu__reset_vcpu(vcpu);
+ kvm_cpu__psci_set_entry(vcpu, entry_addr, context_id);
+}
+
+static void kvm_cpu__psci_system_suspend(struct kvm_cpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool denied = false;
+ int i;
+
+ /*
+ * Mark the caller as paused before actually pausing the VM. This avoids
+ * the hazard of attempting to acquire the pause_lock in the SIGKVMPAUSE
+ * handler from the thread that already holds it.
+ */
+ vcpu->paused = 1;
+
+ kvm__pause(kvm);
+ for (i = 0; i < kvm->nrcpus; i++) {
+ struct kvm_cpu *tmp = kvm->cpus[i];
+ struct kvm_mp_state mp_state;
+
+ if (vcpu == tmp)
+ continue;
+
+ if (ioctl(tmp->vcpu_fd, KVM_GET_MP_STATE, &mp_state) < 0)
+ die("KVM_GET_MP_STATE failed");
+
+ if (mp_state.mp_state != KVM_MP_STATE_STOPPED) {
+ denied = true;
+ break;
+ }
+ }
+
+ if (!denied)
+ kvm_cpu__arm_suspend(vcpu);
+ else
+ kvm_cpu__smccc_return(vcpu, PSCI_RET_DENIED, 0, 0, 0);
+
+ vcpu->paused = 0;
+ kvm__continue(kvm);
+}
+
+bool kvm_cpu__arch_handle_system_event(struct kvm_cpu *vcpu)
+{
+ switch (vcpu->kvm_run->system_event.type) {
+ case KVM_SYSTEM_EVENT_SUSPEND:
+ kvm_cpu__psci_system_suspend(vcpu);
+ return true;
+ case KVM_SYSTEM_EVENT_WAKEUP:
+ kvm_cpu__wakeup(vcpu);
+ return true;
+ default:
+ return false;
+ }
+}
@@ -86,6 +86,15 @@ void kvm__arch_init(struct kvm *kvm, const char *hugetlbfs_path, u64 ram_size)
/* Create the virtual GIC. */
if (gic__create(kvm, kvm->cfg.arch.irqchip))
die("Failed to create virtual GIC");
+
+ if (kvm__supports_extension(kvm, KVM_CAP_ARM_SYSTEM_SUSPEND)) {
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_ARM_SYSTEM_SUSPEND,
+ };
+
+ if (ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &cap) < 0)
+ die("Enabling KVM_CAP_ARM_SYSTEM_SUSPEND failed");
+ }
}
#define FDT_ALIGN SZ_2M
KVM_CAP_ARM_SYSTEM_SUSPEND allows VMMs to trap guest attempts to use the PSCI SYSTEM_SUSPEND hypercall. Make use of that capability in KVM tool to implement guest suspend support. Add some minimal SMCCC register handling (params, return values) for AArch32 and AArch64. Perform only the required sanity check before suspending the VM by ensuring all other vCPUs besides the caller are powered off. Leverage KVM_MP_STATE_SUSPENDED to emulate the suspend as an architectural WFI. Signed-off-by: Oliver Upton <oupton@google.com> --- arm/aarch32/kvm-cpu.c | 72 ++++++++++++++++++++++++ arm/aarch64/kvm-cpu.c | 66 ++++++++++++++++++++++ arm/include/arm-common/kvm-cpu-arch.h | 5 ++ arm/kvm-cpu.c | 81 +++++++++++++++++++++++++++ arm/kvm.c | 9 +++ 5 files changed, 233 insertions(+)