@@ -523,6 +523,8 @@ struct kvm_vcpu_hv {
/* Xen HVM per vcpu emulation context */
struct kvm_vcpu_xen {
u64 hypercall_rip;
+ struct kvm_host_map vcpu_info_map;
+ struct vcpu_info *vcpu_info;
};
struct kvm_vcpu_arch {
@@ -10004,6 +10004,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
kvm_hv_vcpu_uninit(vcpu);
+ kvm_xen_vcpu_uninit(vcpu);
kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
@@ -80,6 +80,27 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
return 0;
}
+static void *xen_vcpu_info(struct kvm_vcpu *v)
+{
+ struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(v);
+ struct kvm_xen *kvm = &v->kvm->arch.xen;
+ void *hva;
+
+ hva = READ_ONCE(vcpu_xen->vcpu_info);
+ if (hva)
+ return hva;
+
+ if (v->vcpu_id < MAX_VIRT_CPUS)
+ hva = READ_ONCE(kvm->shinfo);
+
+ if (hva) {
+ hva += offsetof(struct shared_info, vcpu_info);
+ hva += v->vcpu_id * sizeof(struct vcpu_info);
+ }
+
+ return hva;
+}
+
void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
{
struct kvm_vcpu_arch *vcpu = &v->arch;
@@ -88,9 +109,6 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
void *hva;
int idx;
- if (v->vcpu_id >= MAX_VIRT_CPUS)
- return;
-
BUILD_BUG_ON(offsetof(struct shared_info, vcpu_info) != 0);
BUILD_BUG_ON(offsetof(struct compat_shared_info, vcpu_info) != 0);
BUILD_BUG_ON(sizeof(struct vcpu_info) != sizeof(struct compat_vcpu_info));
@@ -98,7 +116,7 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
offsetof(struct compat_vcpu_info, time));
idx = srcu_read_lock(&v->kvm->srcu);
- hva = READ_ONCE(v->kvm->arch.xen.shinfo);
+ hva = xen_vcpu_info(v);
if (!hva)
goto out;
@@ -108,7 +126,7 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
guest_hv_clock = hva + offset;
if (guest_hv_clock->version & 1)
- ++guest_hv_clock->version; /* first time write, random junk */
+ ++guest_hv_clock->version;
vcpu->hv_clock.version = guest_hv_clock->version + 1;
guest_hv_clock->version = vcpu->hv_clock.version;
@@ -137,6 +155,20 @@ void kvm_xen_setup_pvclock_page(struct kvm_vcpu *v)
srcu_read_unlock(&v->kvm->srcu, idx);
}
+static int vcpu_attr_loc(struct kvm_vcpu *vcpu, u16 type,
+ struct kvm_host_map **map, void ***hva, size_t *sz)
+{
+ switch(type) {
+ case KVM_XEN_ATTR_TYPE_VCPU_INFO:
+ *map = &vcpu->arch.xen.vcpu_info_map;
+ *hva = (void **)&vcpu->arch.xen.vcpu_info;
+ if (sz)
+ *sz = sizeof(struct vcpu_info);
+ return 0;
+ }
+ return -EINVAL;
+}
+
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{
int r = -ENOENT;
@@ -157,6 +189,28 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
break;
}
+ case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+ gpa_t gpa = data->u.vcpu_attr.gpa;
+ struct kvm_host_map *map;
+ struct kvm_vcpu *v;
+ size_t sz;
+ void **hva;
+
+ v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+ if (!v)
+ return -EINVAL;
+
+ r = vcpu_attr_loc(v, data->type, &map, &hva, &sz);
+ if (r)
+ return r;
+
+ r = kvm_xen_map_guest_page(kvm, map, hva, gpa, sz);
+ if (!r)
+ kvm_xen_setup_pvclock_page(v);
+
+ break;
+ }
+
default:
break;
}
@@ -182,6 +236,27 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
break;
}
+ case KVM_XEN_ATTR_TYPE_VCPU_INFO: {
+ struct kvm_host_map *map;
+ struct kvm_vcpu *v;
+ void **hva;
+
+ v = kvm_get_vcpu(kvm, data->u.vcpu_attr.vcpu);
+ if (!v)
+ return -EINVAL;
+
+ r = vcpu_attr_loc(v, data->type, &map, &hva, NULL);
+ if (r)
+ return r;
+
+ if (*hva) {
+ data->u.vcpu_attr.gpa = gfn_to_gpa(map->gfn) +
+ offset_in_page(*hva);
+ r = 0;
+ }
+ break;
+ }
+
default:
break;
}
@@ -321,6 +396,17 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
return 0;
}
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_xen *vcpu_xen = vcpu_to_xen_vcpu(vcpu);
+
+ if (vcpu_xen->vcpu_info) {
+ kvm_unmap_gfn(vcpu->kvm, &vcpu_xen->vcpu_info_map,
+ NULL, true, false);
+ vcpu_xen->vcpu_info = NULL;
+ }
+}
+
void kvm_xen_destroy_vm(struct kvm *kvm)
{
struct kvm_xen *xen = &kvm->arch.xen;
@@ -9,12 +9,26 @@
#ifndef __ARCH_X86_KVM_XEN_H__
#define __ARCH_X86_KVM_XEN_H__
+static inline struct kvm_vcpu_xen *vcpu_to_xen_vcpu(struct kvm_vcpu *vcpu)
+{
+ return &vcpu->arch.xen;
+}
+
+static inline struct kvm_vcpu *xen_vcpu_to_vcpu(struct kvm_vcpu_xen *xen_vcpu)
+{
+ struct kvm_vcpu_arch *arch;
+
+ arch = container_of(xen_vcpu, struct kvm_vcpu_arch, xen);
+ return container_of(arch, struct kvm_vcpu, arch);
+}
+
void kvm_xen_setup_pvclock_page(struct kvm_vcpu *vcpu);
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
int kvm_xen_hvm_config(struct kvm_vcpu *vcpu, u64 data);
void kvm_xen_destroy_vm(struct kvm *kvm);
+void kvm_xen_vcpu_uninit(struct kvm_vcpu *vcpu);
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
{
@@ -1588,12 +1588,17 @@ struct kvm_xen_hvm_attr {
struct {
__u64 gfn;
} shared_info;
+ struct {
+ __u32 vcpu;
+ __u64 gpa;
+ } vcpu_attr;
__u64 pad[4];
} u;
};
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
+#define KVM_XEN_ATTR_TYPE_VCPU_INFO 0x2
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {