[v4,04/10] x86/vmx: implement processor tracing for VMX
diff mbox series

Message ID 70df90dad7e759f4bb3dba405dc45e372a57fab7.1593519420.git.michal.leszczynski@cert.pl
State Superseded
Headers show
Series
  • Implement support for external IPT monitoring
Related show

Commit Message

Michał Leszczyński June 30, 2020, 12:33 p.m. UTC
From: Michal Leszczynski <michal.leszczynski@cert.pl>

Use Intel Processor Trace feature in order to
provision vmtrace_pt_* features.

Signed-off-by: Michal Leszczynski <michal.leszczynski@cert.pl>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 89 ++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h      | 38 +++++++++++++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  3 +
 xen/include/asm-x86/hvm/vmx/vmx.h  | 14 +++++
 4 files changed, 144 insertions(+)

Comments

Roger Pau Monné July 1, 2020, 10:30 a.m. UTC | #1
On Tue, Jun 30, 2020 at 02:33:47PM +0200, Michał Leszczyński wrote:
> From: Michal Leszczynski <michal.leszczynski@cert.pl>
> 
> Use Intel Processor Trace feature in order to
> provision vmtrace_pt_* features.
> 
> Signed-off-by: Michal Leszczynski <michal.leszczynski@cert.pl>
> ---
>  xen/arch/x86/hvm/vmx/vmx.c         | 89 ++++++++++++++++++++++++++++++
>  xen/include/asm-x86/hvm/hvm.h      | 38 +++++++++++++
>  xen/include/asm-x86/hvm/vmx/vmcs.h |  3 +
>  xen/include/asm-x86/hvm/vmx/vmx.h  | 14 +++++
>  4 files changed, 144 insertions(+)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index ab19d9424e..db3f051b40 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -508,11 +508,24 @@ static void vmx_restore_host_msrs(void)
>  
>  static void vmx_save_guest_msrs(struct vcpu *v)
>  {
> +    uint64_t rtit_ctl;
> +
>      /*
>       * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
>       * be updated at any time via SWAPGS, which we cannot trap.
>       */
>      v->arch.hvm.vmx.shadow_gs = rdgsshadow();
> +
> +    if ( unlikely(v->arch.hvm.vmx.pt_state &&
> +                  v->arch.hvm.vmx.pt_state->active) )
> +    {

Nit: define rtit_ctl here to reduce the scope.

> +        rdmsrl(MSR_RTIT_CTL, rtit_ctl);
> +        BUG_ON(rtit_ctl & RTIT_CTL_TRACEEN);
> +
> +        rdmsrl(MSR_RTIT_STATUS, v->arch.hvm.vmx.pt_state->status);
> +        rdmsrl(MSR_RTIT_OUTPUT_MASK,
> +               v->arch.hvm.vmx.pt_state->output_mask.raw);
> +    }
>  }
>  
>  static void vmx_restore_guest_msrs(struct vcpu *v)
> @@ -524,6 +537,17 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
>  
>      if ( cpu_has_msr_tsc_aux )
>          wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
> +
> +    if ( unlikely(v->arch.hvm.vmx.pt_state &&
> +                  v->arch.hvm.vmx.pt_state->active) )
> +    {
> +        wrmsrl(MSR_RTIT_OUTPUT_BASE,
> +               v->arch.hvm.vmx.pt_state->output_base);
> +        wrmsrl(MSR_RTIT_OUTPUT_MASK,
> +               v->arch.hvm.vmx.pt_state->output_mask.raw);
> +        wrmsrl(MSR_RTIT_STATUS,
> +               v->arch.hvm.vmx.pt_state->status);
> +    }
>  }
>  
>  void vmx_update_cpu_exec_control(struct vcpu *v)
> @@ -2240,6 +2264,60 @@ static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
>      return true;
>  }
>  
> +static int vmx_init_pt(struct vcpu *v)
> +{
> +    v->arch.hvm.vmx.pt_state = xzalloc(struct pt_state);
> +
> +    if ( !v->arch.hvm.vmx.pt_state )
> +        return -EFAULT;

-ENOMEM

> +
> +    if ( !v->arch.vmtrace.pt_buf )

Agian, I'm quite sure this doesn't build, since pt_buf is introduced
in patch 5.

I will try to continue to review, but it's quite hard when fields not
yet introduced are used in the code, as I have no idea what that is.

> +        return -EINVAL;
> +
> +    if ( !v->domain->vmtrace_pt_size )
> +	return -EINVAL;

Indentation (hard tab), and could be joined with the previous check,
since both return -EINVAL.

> +
> +    v->arch.hvm.vmx.pt_state->output_base = page_to_maddr(v->arch.vmtrace.pt_buf);
> +    v->arch.hvm.vmx.pt_state->output_mask.raw = v->domain->vmtrace_pt_size - 1;
> +
> +    if ( vmx_add_host_load_msr(v, MSR_RTIT_CTL, 0) )
> +        return -EFAULT;
> +
> +    if ( vmx_add_guest_msr(v, MSR_RTIT_CTL,
> +                              RTIT_CTL_TRACEEN | RTIT_CTL_OS |
> +                              RTIT_CTL_USR | RTIT_CTL_BRANCH_EN) )
> +        return -EFAULT;

I think I've already pointed this out before (in v2), but please don't
drop the returned error codes from vmx_add_host_load_msr and
vmx_add_guest_msr. Please store them in a local variable and return
those if != 0.

> +
> +    return 0;
> +}
> +
> +static int vmx_destroy_pt(struct vcpu* v)
> +{
> +    if ( v->arch.hvm.vmx.pt_state )
> +        xfree(v->arch.hvm.vmx.pt_state);
> +
> +    v->arch.hvm.vmx.pt_state = NULL;
> +    return 0;
> +}

I think those should be port of vmx_vcpu_{initialise/destroy}, there's
no need to introduce new hooks for it? As the allocation size will be
known at domain creation already.

> +static int vmx_control_pt(struct vcpu *v, bool_t enable)

Plain bool.

> +{
> +    if ( !v->arch.hvm.vmx.pt_state )
> +        return -EINVAL;
> +
> +    v->arch.hvm.vmx.pt_state->active = enable;
> +    return 0;
> +}
> +
> +static int vmx_get_pt_offset(struct vcpu *v, uint64_t *offset)
> +{
> +    if ( !v->arch.hvm.vmx.pt_state )
> +        return -EINVAL;
> +
> +    *offset = v->arch.hvm.vmx.pt_state->output_mask.offset;
> +    return 0;
> +}
> +
>  static struct hvm_function_table __initdata vmx_function_table = {
>      .name                 = "VMX",
>      .cpu_up_prepare       = vmx_cpu_up_prepare,
> @@ -2295,6 +2373,10 @@ static struct hvm_function_table __initdata vmx_function_table = {
>      .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
>      .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
>      .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
> +    .vmtrace_init_pt = vmx_init_pt,
> +    .vmtrace_destroy_pt = vmx_destroy_pt,
> +    .vmtrace_control_pt = vmx_control_pt,
> +    .vmtrace_get_pt_offset = vmx_get_pt_offset,

As pointed out above, vmtrace_init_pt and vmtrace_destroy_pt should
IMO be dropped and instead done in vmx_vcpu_{initialise/destroy}.

>      .tsc_scaling = {
>          .max_ratio = VMX_TSC_MULTIPLIER_MAX,
>      },
> @@ -3674,6 +3756,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
>  
>      hvm_invalidate_regs_fields(regs);
>  
> +    if ( unlikely(v->arch.hvm.vmx.pt_state &&
> +                  v->arch.hvm.vmx.pt_state->active) )
> +    {
> +        rdmsrl(MSR_RTIT_OUTPUT_MASK,
> +               v->arch.hvm.vmx.pt_state->output_mask.raw);
> +    }
> +
>      if ( paging_mode_hap(v->domain) )
>      {
>          /*
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 1eb377dd82..8f194889e5 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -214,6 +214,12 @@ struct hvm_function_table {
>      bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
>      int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
>  
> +    /* vmtrace */
> +    int (*vmtrace_init_pt)(struct vcpu *v);
> +    int (*vmtrace_destroy_pt)(struct vcpu *v);
> +    int (*vmtrace_control_pt)(struct vcpu *v, bool_t enable);
> +    int (*vmtrace_get_pt_offset)(struct vcpu *v, uint64_t *offset);
> +
>      /*
>       * Parameters and callbacks for hardware-assisted TSC scaling,
>       * which are valid only when the hardware feature is available.
> @@ -655,6 +661,38 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
>      return false;
>  }
>  
> +static inline int vmtrace_init_pt(struct vcpu *v)
> +{
> +    if ( hvm_funcs.vmtrace_init_pt )
> +        return hvm_funcs.vmtrace_init_pt(v);
> +
> +    return -EOPNOTSUPP;
> +}
> +
> +static inline int vmtrace_destroy_pt(struct vcpu *v)
> +{
> +    if ( hvm_funcs.vmtrace_destroy_pt )
> +        return hvm_funcs.vmtrace_destroy_pt(v);
> +
> +    return -EOPNOTSUPP;
> +}
> +
> +static inline int vmtrace_control_pt(struct vcpu *v, bool_t enable)
> +{
> +    if ( hvm_funcs.vmtrace_control_pt )
> +        return hvm_funcs.vmtrace_control_pt(v, enable);
> +
> +    return -EOPNOTSUPP;
> +}
> +
> +static inline int vmtrace_get_pt_offset(struct vcpu *v, uint64_t *offset)
> +{
> +    if ( hvm_funcs.vmtrace_get_pt_offset )
> +        return hvm_funcs.vmtrace_get_pt_offset(v, offset);
> +
> +    return -EOPNOTSUPP;
> +}
> +
>  /*
>   * This must be defined as a macro instead of an inline function,
>   * because it uses 'struct vcpu' and 'struct domain' which have
> diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
> index 0e9a0b8de6..64c0d82614 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -186,6 +186,9 @@ struct vmx_vcpu {
>       * pCPU and wakeup the related vCPU.
>       */
>      struct pi_blocking_vcpu pi_blocking;
> +
> +    /* State of processor trace feature */
> +    struct pt_state      *pt_state;

I think it's fine to add this here for now, but we might also consider
putting it outside of a HVM specific structure if it's to be used by
PV guests. Since all this is HVM specific I'm fine with adding it
here.

>  };
>  
>  int vmx_create_vmcs(struct vcpu *v);
> diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
> index 111ccd7e61..be7213d3c0 100644
> --- a/xen/include/asm-x86/hvm/vmx/vmx.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmx.h
> @@ -689,4 +689,18 @@ typedef union ldt_or_tr_instr_info {
>      };
>  } ldt_or_tr_instr_info_t;
>  
> +/* Processor Trace state per vCPU */
> +struct pt_state {

Please use ipt_state here, since this is an Intel specific structure.

> +    bool_t active;

Plain bool.

Thanks, Roger.

Patch
diff mbox series

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ab19d9424e..db3f051b40 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -508,11 +508,24 @@  static void vmx_restore_host_msrs(void)
 
 static void vmx_save_guest_msrs(struct vcpu *v)
 {
+    uint64_t rtit_ctl;
+
     /*
      * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
      * be updated at any time via SWAPGS, which we cannot trap.
      */
     v->arch.hvm.vmx.shadow_gs = rdgsshadow();
+
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        rdmsrl(MSR_RTIT_CTL, rtit_ctl);
+        BUG_ON(rtit_ctl & RTIT_CTL_TRACEEN);
+
+        rdmsrl(MSR_RTIT_STATUS, v->arch.hvm.vmx.pt_state->status);
+        rdmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+    }
 }
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
@@ -524,6 +537,17 @@  static void vmx_restore_guest_msrs(struct vcpu *v)
 
     if ( cpu_has_msr_tsc_aux )
         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
+
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        wrmsrl(MSR_RTIT_OUTPUT_BASE,
+               v->arch.hvm.vmx.pt_state->output_base);
+        wrmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+        wrmsrl(MSR_RTIT_STATUS,
+               v->arch.hvm.vmx.pt_state->status);
+    }
 }
 
 void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -2240,6 +2264,60 @@  static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
     return true;
 }
 
+static int vmx_init_pt(struct vcpu *v)
+{
+    v->arch.hvm.vmx.pt_state = xzalloc(struct pt_state);
+
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EFAULT;
+
+    if ( !v->arch.vmtrace.pt_buf )
+        return -EINVAL;
+
+    if ( !v->domain->vmtrace_pt_size )
+	return -EINVAL;
+
+    v->arch.hvm.vmx.pt_state->output_base = page_to_maddr(v->arch.vmtrace.pt_buf);
+    v->arch.hvm.vmx.pt_state->output_mask.raw = v->domain->vmtrace_pt_size - 1;
+
+    if ( vmx_add_host_load_msr(v, MSR_RTIT_CTL, 0) )
+        return -EFAULT;
+
+    if ( vmx_add_guest_msr(v, MSR_RTIT_CTL,
+                              RTIT_CTL_TRACEEN | RTIT_CTL_OS |
+                              RTIT_CTL_USR | RTIT_CTL_BRANCH_EN) )
+        return -EFAULT;
+
+    return 0;
+}
+
+static int vmx_destroy_pt(struct vcpu* v)
+{
+    if ( v->arch.hvm.vmx.pt_state )
+        xfree(v->arch.hvm.vmx.pt_state);
+
+    v->arch.hvm.vmx.pt_state = NULL;
+    return 0;
+}
+
+static int vmx_control_pt(struct vcpu *v, bool_t enable)
+{
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EINVAL;
+
+    v->arch.hvm.vmx.pt_state->active = enable;
+    return 0;
+}
+
+static int vmx_get_pt_offset(struct vcpu *v, uint64_t *offset)
+{
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EINVAL;
+
+    *offset = v->arch.hvm.vmx.pt_state->output_mask.offset;
+    return 0;
+}
+
 static struct hvm_function_table __initdata vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -2295,6 +2373,10 @@  static struct hvm_function_table __initdata vmx_function_table = {
     .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
     .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
     .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
+    .vmtrace_init_pt = vmx_init_pt,
+    .vmtrace_destroy_pt = vmx_destroy_pt,
+    .vmtrace_control_pt = vmx_control_pt,
+    .vmtrace_get_pt_offset = vmx_get_pt_offset,
     .tsc_scaling = {
         .max_ratio = VMX_TSC_MULTIPLIER_MAX,
     },
@@ -3674,6 +3756,13 @@  void vmx_vmexit_handler(struct cpu_user_regs *regs)
 
     hvm_invalidate_regs_fields(regs);
 
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        rdmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+    }
+
     if ( paging_mode_hap(v->domain) )
     {
         /*
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1eb377dd82..8f194889e5 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -214,6 +214,12 @@  struct hvm_function_table {
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
 
+    /* vmtrace */
+    int (*vmtrace_init_pt)(struct vcpu *v);
+    int (*vmtrace_destroy_pt)(struct vcpu *v);
+    int (*vmtrace_control_pt)(struct vcpu *v, bool_t enable);
+    int (*vmtrace_get_pt_offset)(struct vcpu *v, uint64_t *offset);
+
     /*
      * Parameters and callbacks for hardware-assisted TSC scaling,
      * which are valid only when the hardware feature is available.
@@ -655,6 +661,38 @@  static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
     return false;
 }
 
+static inline int vmtrace_init_pt(struct vcpu *v)
+{
+    if ( hvm_funcs.vmtrace_init_pt )
+        return hvm_funcs.vmtrace_init_pt(v);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_destroy_pt(struct vcpu *v)
+{
+    if ( hvm_funcs.vmtrace_destroy_pt )
+        return hvm_funcs.vmtrace_destroy_pt(v);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_control_pt(struct vcpu *v, bool_t enable)
+{
+    if ( hvm_funcs.vmtrace_control_pt )
+        return hvm_funcs.vmtrace_control_pt(v, enable);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_get_pt_offset(struct vcpu *v, uint64_t *offset)
+{
+    if ( hvm_funcs.vmtrace_get_pt_offset )
+        return hvm_funcs.vmtrace_get_pt_offset(v, offset);
+
+    return -EOPNOTSUPP;
+}
+
 /*
  * This must be defined as a macro instead of an inline function,
  * because it uses 'struct vcpu' and 'struct domain' which have
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 0e9a0b8de6..64c0d82614 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -186,6 +186,9 @@  struct vmx_vcpu {
      * pCPU and wakeup the related vCPU.
      */
     struct pi_blocking_vcpu pi_blocking;
+
+    /* State of processor trace feature */
+    struct pt_state      *pt_state;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 111ccd7e61..be7213d3c0 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -689,4 +689,18 @@  typedef union ldt_or_tr_instr_info {
     };
 } ldt_or_tr_instr_info_t;
 
+/* Processor Trace state per vCPU */
+struct pt_state {
+    bool_t active;
+    uint64_t status;
+    uint64_t output_base;
+    union {
+        uint64_t raw;
+        struct {
+            uint32_t size;
+            uint32_t offset;
+        };
+    } output_mask;
+};
+
 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */