@@ -3022,3 +3022,71 @@ int kvm_vm_ioctl_get_hv_vsm_state(struct kvm *kvm, struct kvm_hv_vsm_state *stat
state->vsm_code_page_offsets = hv->vsm_code_page_offsets.as_u64;
return 0;
}
+
+struct kvm_hv_vtl_dev {
+ int vtl;
+};
+
+static int kvm_hv_vtl_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct kvm_hv_vtl_dev *vtl_dev = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_HV_VTL_GROUP:
+ switch (attr->attr){
+ case KVM_DEV_HV_VTL_GROUP_VTLNUM:
+ return put_user(vtl_dev->vtl, (u32 __user *)attr->addr);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void kvm_hv_vtl_release(struct kvm_device *dev)
+{
+ struct kvm_hv_vtl_dev *vtl_dev = dev->private;
+
+ kfree(vtl_dev);
+ kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
+}
+
+static int kvm_hv_vtl_create(struct kvm_device *dev, u32 type);
+
+static struct kvm_device_ops kvm_hv_vtl_ops = {
+ .name = "kvm-hv-vtl",
+ .create = kvm_hv_vtl_create,
+ .release = kvm_hv_vtl_release,
+ .get_attr = kvm_hv_vtl_get_attr,
+};
+
+static int kvm_hv_vtl_create(struct kvm_device *dev, u32 type)
+{
+ struct kvm_hv_vtl_dev *vtl_dev;
+ struct kvm_device *tmp;
+ int vtl = 0;
+
+ vtl_dev = kzalloc(sizeof(*vtl_dev), GFP_KERNEL_ACCOUNT);
+ if (!vtl_dev)
+ return -ENOMEM;
+
+ /* Device creation is protected by kvm->lock */
+ list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
+ if (tmp->ops == &kvm_hv_vtl_ops)
+ vtl++;
+
+ vtl_dev->vtl = vtl;
+ dev->private = vtl_dev;
+
+ return 0;
+}
+
+int kvm_hv_vtl_dev_register(void)
+{
+ return kvm_register_device_ops(&kvm_hv_vtl_ops, KVM_DEV_TYPE_HV_VSM_VTL);
+}
+
+void kvm_hv_vtl_dev_unregister(void)
+{
+ kvm_unregister_device_ops(KVM_DEV_TYPE_HV_VSM_VTL);
+}
@@ -269,4 +269,7 @@ static inline void kvm_mmu_role_set_hv_bits(struct kvm_vcpu *vcpu,
role->vtl = kvm_hv_get_active_vtl(vcpu);
}
+int kvm_hv_vtl_dev_register(void);
+void kvm_hv_vtl_dev_unregister(void);
+
#endif
@@ -6521,6 +6521,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
mutex_unlock(&kvm->lock);
break;
case KVM_CAP_HYPERV_VSM:
+ kvm_hv_vtl_dev_register();
kvm->arch.hyperv.hv_enable_vsm = true;
r = 0;
break;
@@ -9675,6 +9676,8 @@ void kvm_x86_vendor_exit(void)
mutex_lock(&vendor_module_lock);
kvm_x86_ops.hardware_enable = NULL;
mutex_unlock(&vendor_module_lock);
+
+ kvm_hv_vtl_dev_unregister();
}
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
@@ -1471,6 +1471,9 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
+#define KVM_DEV_HV_VTL_GROUP 1
+#define KVM_DEV_HV_VTL_GROUP_VTLNUM 1
+
enum kvm_device_type {
KVM_DEV_TYPE_FSL_MPIC_20 = 1,
#define KVM_DEV_TYPE_FSL_MPIC_20 KVM_DEV_TYPE_FSL_MPIC_20
@@ -1494,6 +1497,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_RISCV_AIA,
#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
+ KVM_DEV_TYPE_HV_VSM_VTL,
+#define KVM_DEV_TYPE_HV_VSM_VTL KVM_DEV_TYPE_HV_VSM_VTL
KVM_DEV_TYPE_MAX,
};
Introduce a new KVM device aimed at tracking partition wide VTL state, it'll be the one responsible from keeping track of VTL's memory protections. For now its functionality it's limited, it only exposes its VTL level through a device attribute. Additionally, the device type is only registered if the VSM cap is enabled. Signed-off-by: Nicolas Saenz Julienne <nsaenz@amazon.com> --- arch/x86/kvm/hyperv.c | 68 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/hyperv.h | 3 ++ arch/x86/kvm/x86.c | 3 ++ include/uapi/linux/kvm.h | 5 +++ 4 files changed, 79 insertions(+)