@@ -1774,6 +1774,8 @@ static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(v, &ctxt);
+ ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
+
ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
hvm_get_segment_register(v, x86_seg_idtr, &seg);
@@ -2070,6 +2072,8 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
+ hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm_domain.sync_tsc);
+
seg.limit = ctxt.idtr_limit;
seg.base = ctxt.idtr_base;
hvm_set_segment_register(v, x86_seg_idtr, &seg);
@@ -357,9 +357,6 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
data->msr_syscall_mask = vmcb->sfmask;
data->msr_efer = v->arch.hvm_vcpu.guest_efer;
data->msr_flags = -1ULL;
-
- data->tsc = hvm_get_guest_tsc_fixed(v,
- v->domain->arch.hvm_domain.sync_tsc);
}
@@ -374,8 +371,6 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
-
- hvm_set_guest_tsc_fixed(v, data->tsc, v->domain->arch.hvm_domain.sync_tsc);
}
static void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
@@ -587,9 +587,6 @@ static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
data->msr_lstar = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR];
data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
-
- data->tsc = hvm_get_guest_tsc_fixed(v,
- v->domain->arch.hvm_domain.sync_tsc);
}
static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
@@ -604,8 +601,6 @@ static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
v->arch.hvm_vmx.cstar = data->msr_cstar;
v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
-
- hvm_set_guest_tsc_fixed(v, data->tsc, v->domain->arch.hvm_domain.sync_tsc);
}