@@ -1033,6 +1033,7 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
static int pt_init(struct vcpu_vmx *vmx)
{
+ unsigned int eax, ebx, ecx, edx;
u32 pt_state_sz = sizeof(struct pt_state) + sizeof(u64) *
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2;
@@ -1044,13 +1045,35 @@ static int pt_init(struct vcpu_vmx *vmx)
vmx->pt_desc->host_ctx = (struct pt_state *)(vmx->pt_desc + 1);
vmx->pt_desc->guest_ctx = (void *)vmx->pt_desc->host_ctx + pt_state_sz;
+ cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+ if (ecx & XFEATURE_MASK_PT) {
+ vmx->pt_desc->host_xs = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
+ vmx->pt_desc->guest_xs = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
+ if (!vmx->pt_desc->host_xs || !vmx->pt_desc->guest_xs) {
+ if (vmx->pt_desc->host_xs)
+ kmem_cache_free(x86_fpu_cache,
+ vmx->pt_desc->host_xs);
+ if (vmx->pt_desc->guest_xs)
+ kmem_cache_free(x86_fpu_cache,
+ vmx->pt_desc->guest_xs);
+ } else
+ vmx->pt_desc->pt_xsave = true;
+ }
+
return 0;
}
static void pt_uninit(struct vcpu_vmx *vmx)
{
- if (pt_mode == PT_MODE_HOST_GUEST)
+ if (pt_mode == PT_MODE_HOST_GUEST) {
kfree(vmx->pt_desc);
+ if (vmx->pt_desc->pt_xsave) {
+ kmem_cache_free(x86_fpu_cache, vmx->pt_desc->host_xs);
+ kmem_cache_free(x86_fpu_cache, vmx->pt_desc->guest_xs);
+ }
+ }
}
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
@@ -69,8 +69,11 @@ struct pt_desc {
u64 ctl_bitmask;
u32 addr_range;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+ bool pt_xsave;
struct pt_state *host_ctx;
struct pt_state *guest_ctx;
+ struct fpu *host_xs;
+ struct fpu *guest_xs;
};
/*
Allocate XSAVE area for host and guest Intel PT configuration when Intel PT working in HOST_GUEST mode. Intel PT configuration state can be saved using XSAVES and restored by XRSTORS instruction. Signed-off-by: Luwei Kang <luwei.kang@intel.com> --- arch/x86/kvm/vmx/vmx.c | 25 ++++++++++++++++++++++++- arch/x86/kvm/vmx/vmx.h | 3 +++ 2 files changed, 27 insertions(+), 1 deletion(-)