@@ -21,7 +21,86 @@
#include <xen/types.h>
#include <xen/cache.h>
#include <xen/init.h>
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/intel_pt.h>
/* intel_pt: Flag to enable Intel Processor Trace (default on). */
bool_t __read_mostly opt_intel_pt = 1;
boolean_param("intel_pt", opt_intel_pt);
+
+static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_num)
+{
+ u32 i;
+ wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK_PTRS, ctx->output_mask_ptrs);
+ wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for ( i = 0; i < addr_num; i++ )
+ wrmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addr[i]);
+}
+
+static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_num)
+{
+ u32 i;
+ rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK_PTRS, ctx->output_mask_ptrs);
+ rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for ( i = 0; i < addr_num; i++ )
+ rdmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addr[i]);
+}
+
+void pt_guest_enter(struct vcpu *v)
+{
+ struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+
+ if ( pt->intel_pt_enabled )
+ {
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_IA32_RTIT_CTL, pt->guest_pt_ctx.ctl);
+ vmx_vmcs_exit(v);
+
+ pt_load_msr(&pt->guest_pt_ctx, pt->addr_num);
+ }
+}
+
+void pt_guest_exit(struct vcpu *v)
+{
+ struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+
+ if ( pt->intel_pt_enabled )
+ {
+ vmx_vmcs_enter(v);
+ __vmread(GUEST_IA32_RTIT_CTL, &pt->guest_pt_ctx.ctl);
+ vmx_vmcs_exit(v);
+
+ pt_save_msr(&pt->guest_pt_ctx, pt->addr_num);
+ }
+}
+
+void pt_vcpu_init(struct vcpu *v)
+{
+ struct pt_desc *pt = &v->arch.hvm_vmx.pt_desc;
+ unsigned int eax, ebx, ecx, edx;
+
+ memset(pt, 0, sizeof(struct pt_desc));
+ pt->intel_pt_enabled = false;
+
+ if ( !cpu_has_intel_pt || !opt_intel_pt ||
+ !(v->arch.hvm_vmx.secondary_exec_control & SECONDARY_EXEC_PT_USE_GPA) )
+ return;
+
+ /* get the number of address ranges */
+ if ( cpuid_eax(0x14) == 1 )
+ cpuid_count(0x14, 1, &eax, &ebx, &ecx, &edx);
+ else
+ return;
+
+ pt->addr_num = eax & 0x7;
+ pt->guest_pt_ctx.output_mask_ptrs = 0x7F;
+ pt->intel_pt_enabled = true;
+
+ vmx_vmcs_enter(v);
+ __vmwrite(GUEST_IA32_RTIT_CTL, 0);
+ vmx_vmcs_exit(v);
+}
@@ -468,6 +468,8 @@ static int vmx_vcpu_initialise(struct vcpu *v)
if ( v->vcpu_id == 0 )
v->arch.user_regs.rax = 1;
+ pt_vcpu_init(v);
+
return 0;
}
@@ -3492,6 +3494,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
__vmread(GUEST_RSP, ®s->rsp);
__vmread(GUEST_RFLAGS, ®s->rflags);
+ pt_guest_exit(v);
hvm_invalidate_regs_fields(regs);
if ( paging_mode_hap(v->domain) )
@@ -4260,6 +4263,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
}
}
+ pt_guest_enter(curr);
out:
if ( unlikely(curr->arch.hvm_vmx.lbr_fixup_enabled) )
lbr_fixup();
@@ -421,6 +421,8 @@ enum vmcs_field {
GUEST_PDPTE0 = 0x0000280a,
#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
GUEST_BNDCFGS = 0x00002812,
+ GUEST_IA32_RTIT_CTL = 0x00002814,
+ GUEST_IA32_RTIT_CTL_HIGH = 0x00002815,
HOST_PAT = 0x00002c00,
HOST_EFER = 0x00002c02,
HOST_PERF_GLOBAL_CTRL = 0x00002c04,
@@ -40,4 +40,8 @@ struct pt_desc {
extern bool_t opt_intel_pt;
+void pt_vcpu_init(struct vcpu *v);
+void pt_guest_enter(struct vcpu *v);
+void pt_guest_exit(struct vcpu *v);
+
#endif /* __ASM_X86_HVM_INTEL_PT_H_ */
This patch implement Intel proecessor trace context switch. Signed-off-by: Luwei Kang <luwei.kang@intel.com> --- xen/arch/x86/cpu/intel_pt.c | 79 ++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/vmx/vmx.c | 4 ++ xen/include/asm-x86/hvm/vmx/vmcs.h | 2 + xen/include/asm-x86/intel_pt.h | 4 ++ 4 files changed, 89 insertions(+)