@@ -375,6 +375,9 @@ struct kvm_arch {
* the associated pKVM instance in the hypervisor.
*/
struct kvm_protected_vm pkvm;
+
+ u32 num_migrn_cpus;
+ struct migrn_target_cpu *migrn_cpu;
};
struct kvm_vcpu_fault_info {
@@ -540,6 +540,18 @@ struct reg_mask_range {
__u32 reserved[13];
};
+struct migrn_target_cpu {
+ __u32 midr;
+ __u32 revidr;
+ __u32 aidr;
+ __u32 reserved;
+};
+
+struct kvm_arm_migrn_cpus {
+ __u32 ncpus;
+ struct migrn_target_cpu entries[] __counted_by(ncpus);
+};
+
#endif
#endif /* __ARM_KVM_H__ */
@@ -48,6 +48,9 @@
#include "sys_regs.h"
+/* For now set to 4 */
+#define MAX_MIGRN_TARGET_CPUS 4
+
static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
enum kvm_wfx_trap_policy {
@@ -267,6 +270,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_destroy_mpidr_data(kvm);
kfree(kvm->arch.sysreg_masks);
+ kfree(kvm->arch.migrn_cpu);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
@@ -339,6 +343,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_SYSTEM_SUSPEND:
case KVM_CAP_IRQFD_RESAMPLE:
case KVM_CAP_COUNTER_OFFSET:
+ case KVM_CAP_ARM_MIGRN_TARGET_CPUS:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
@@ -1904,6 +1909,39 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return -EFAULT;
return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
}
+ case KVM_ARM_SET_MIGRN_TARGET_CPUS: {
+ struct kvm_arm_migrn_cpus __user *user_cpus = argp;
+ struct kvm_arm_migrn_cpus cpus;
+ struct migrn_target_cpu *entries;
+ u32 size;
+ int ret;
+
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.num_migrn_cpus) {
+ ret = -EINVAL;
+ goto migrn_target_cpus_unlock;
+ }
+ if (copy_from_user(&cpus, user_cpus, sizeof(*user_cpus))) {
+ ret = -EFAULT;
+ goto migrn_target_cpus_unlock;
+ }
+ if (cpus.ncpus > MAX_MIGRN_TARGET_CPUS) {
+ ret = -E2BIG;
+ goto migrn_target_cpus_unlock;
+ }
+ size = sizeof(struct migrn_target_cpu) * cpus.ncpus;
+ entries = memdup_user(user_cpus->entries, size);
+ if (IS_ERR(entries)) {
+ ret = PTR_ERR(entries);
+ goto migrn_target_cpus_unlock;
+ }
+ kvm->arch.num_migrn_cpus = cpus.ncpus;
+ kvm->arch.migrn_cpu = entries;
+ ret = 0;
+migrn_target_cpus_unlock:
+ mutex_unlock(&kvm->lock);
+ return ret;
+ }
default:
return -EINVAL;
}
@@ -933,6 +933,7 @@ struct kvm_enable_cap {
#define KVM_CAP_PRE_FAULT_MEMORY 236
#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
#define KVM_CAP_X86_GUEST_MODE 238
+#define KVM_CAP_ARM_MIGRN_TARGET_CPUS 239
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1250,6 +1251,7 @@ struct kvm_vfio_spapr_tce {
/* Available with KVM_CAP_COUNTER_OFFSET */
#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
+#define KVM_ARM_SET_MIGRN_TARGET_CPUS _IOW(KVMIO, 0xb7, struct kvm_arm_migrn_cpus)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
Add a VM IOCTL to allow userspace(VMM) to set a number of migration target CPUs. This can be used to tell KVM the list of targets this VM will encounter in its lifetime. In subsequent patches, KVM will use this information to enable errata associated with all the target CPUs for this VM. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/include/uapi/asm/kvm.h | 12 ++++++++++ arch/arm64/kvm/arm.c | 38 +++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 4 files changed, 55 insertions(+)