@@ -524,7 +524,9 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_xen {
u64 hypercall_rip;
bool vcpu_info_set;
+ bool vcpu_time_info_set;
struct gfn_to_hva_cache vcpu_info_cache;
+ struct gfn_to_hva_cache vcpu_time_info_cache;
};
struct kvm_vcpu_arch {
@@ -2735,6 +2735,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->xen.vcpu_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache,
offsetof(struct compat_vcpu_info, time));
+ if (vcpu->xen.vcpu_time_info_set)
+ kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
if (v == kvm_get_vcpu(v->kvm, 0))
kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
@@ -95,6 +95,21 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
break;
+ case KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO:
+ v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
+ if (!v)
+ return -EINVAL;
+
+ r = kvm_gfn_to_hva_cache_init(kvm, &v->arch.xen.vcpu_time_info_cache,
+ data->u.vcpu_attr.gpa,
+ sizeof(struct pvclock_vcpu_time_info));
+ if (r)
+ return r;
+
+ v->arch.xen.vcpu_time_info_set = true;
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+ break;
+
default:
break;
}
@@ -131,6 +146,17 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
}
break;
+ case KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO:
+ v = kvm_get_vcpu_by_id(kvm, data->u.vcpu_attr.vcpu_id);
+ if (!v)
+ return -EINVAL;
+
+ if (v->arch.xen.vcpu_time_info_set) {
+ data->u.vcpu_attr.gpa = v->arch.xen.vcpu_time_info_cache.gpa;
+ r = 0;
+ }
+ break;
+
default:
break;
}
@@ -1607,6 +1607,7 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_VCPU_INFO 0x2
+#define KVM_XEN_ATTR_TYPE_VCPU_TIME_INFO 0x3
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {