diff mbox

[v1,4/5] x86/msr: introduce guest_rdmsr()

Message ID 20170830103433.6605-5-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Aug. 30, 2017, 10:34 a.m. UTC
The new function is responsible for handling RDMSR from both HVM and PV
guests. Currently it handles only 2 MSRs:

    MSR_INTEL_PLATFORM_INFO
    MSR_INTEL_MISC_FEATURES_ENABLES

It has a different behaviour compared to the old MSR handlers: if MSR
is being handled by guest_rdmsr() then RDMSR will either succeed (if
a guest is allowed to access it based on its MSR policy) or produce
a GP fault. A guest will never see a H/W value of some MSR unknown to
this function.

guest_rdmsr() unifies and replaces the handling code from
vmx_msr_read_intercept() and priv_op_read_msr().

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/hvm/hvm.c         |  7 ++++++-
 xen/arch/x86/hvm/vmx/vmx.c     | 10 ----------
 xen/arch/x86/msr.c             | 31 +++++++++++++++++++++++++++++++
 xen/arch/x86/pv/emul-priv-op.c | 22 ++++------------------
 xen/include/asm-x86/msr.h      |  8 ++++++++
 5 files changed, 49 insertions(+), 29 deletions(-)

Comments

Andrew Cooper Aug. 30, 2017, 10:43 a.m. UTC | #1
On 30/08/17 11:34, Sergey Dyasli wrote:
> The new function is responsible for handling RDMSR from both HVM and PV
> guests. Currently it handles only 2 MSRs:
>
>     MSR_INTEL_PLATFORM_INFO
>     MSR_INTEL_MISC_FEATURES_ENABLES
>
> It has a different behaviour compared to the old MSR handlers: if MSR
> is being handled by guest_rdmsr() then RDMSR will either succeed (if
> a guest is allowed to access it based on its MSR policy) or produce
> a GP fault. A guest will never see a H/W value of some MSR unknown to
> this function.
>
> guest_rdmsr() unifies and replaces the handling code from
> vmx_msr_read_intercept() and priv_op_read_msr().
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

You should also note that this (along with the prep work in
init_domain_msr_policy()) fixes a bug where dom0 could probe and find
CPUID faulting, even though it couldn't actually use it.

~Andrew
Tian, Kevin Sept. 21, 2017, 1:53 a.m. UTC | #2
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
> Sent: Wednesday, August 30, 2017 6:35 PM
> 
> The new function is responsible for handling RDMSR from both HVM and
> PV
> guests. Currently it handles only 2 MSRs:
> 
>     MSR_INTEL_PLATFORM_INFO
>     MSR_INTEL_MISC_FEATURES_ENABLES
> 
> It has a different behaviour compared to the old MSR handlers: if MSR
> is being handled by guest_rdmsr() then RDMSR will either succeed (if
> a guest is allowed to access it based on its MSR policy) or produce
> a GP fault. A guest will never see a H/W value of some MSR unknown to
> this function.
> 
> guest_rdmsr() unifies and replaces the handling code from
> vmx_msr_read_intercept() and priv_op_read_msr().
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>, with a small comment:

> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3334,11 +3334,16 @@ int hvm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      struct vcpu *v = current;
>      struct domain *d = v->domain;
>      uint64_t *var_range_base, *fixed_range_base;
> -    int ret = X86EMUL_OKAY;
> +    int ret;
> 
>      var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
>      fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
> 
> +    if ( (ret = guest_rdmsr(v, msr, msr_content)) !=
> X86EMUL_UNHANDLEABLE )
> +        return ret;
> +    else
> +        ret = X86EMUL_OKAY;
> +

no need to add 'else' here.
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2ad07d52bc..ec7205ee32 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3334,11 +3334,16 @@  int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     struct vcpu *v = current;
     struct domain *d = v->domain;
     uint64_t *var_range_base, *fixed_range_base;
-    int ret = X86EMUL_OKAY;
+    int ret;
 
     var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
     fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
 
+    if ( (ret = guest_rdmsr(v, msr, msr_content)) != X86EMUL_UNHANDLEABLE )
+        return ret;
+    else
+        ret = X86EMUL_OKAY;
+
     switch ( msr )
     {
         unsigned int index;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 155fba9017..ac34383658 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2896,16 +2896,6 @@  static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
             goto gp_fault;
         break;
 
-    case MSR_INTEL_PLATFORM_INFO:
-        *msr_content = MSR_PLATFORM_INFO_CPUID_FAULTING;
-        break;
-
-    case MSR_INTEL_MISC_FEATURES_ENABLES:
-        *msr_content = 0;
-        if ( current->arch.msr->misc_features_enables.cpuid_faulting )
-            *msr_content |= MSR_MISC_FEATURES_CPUID_FAULTING;
-        break;
-
     default:
         if ( passive_domain_do_rdmsr(msr, msr_content) )
             goto done;
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index b5ad97d3c8..a822a132ad 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -117,6 +117,37 @@  int init_vcpu_msr_policy(struct vcpu *v)
     return 0;
 }
 
+int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+{
+    const struct msr_domain_policy *dp = v->domain->arch.msr;
+    const struct msr_vcpu_policy *vp = v->arch.msr;
+
+    switch ( msr )
+    {
+    case MSR_INTEL_PLATFORM_INFO:
+        if ( !dp->plaform_info.available )
+            goto gp_fault;
+        *val = (uint64_t) dp->plaform_info.cpuid_faulting <<
+                   _MSR_PLATFORM_INFO_CPUID_FAULTING;
+        break;
+
+    case MSR_INTEL_MISC_FEATURES_ENABLES:
+        if ( !vp->misc_features_enables.available )
+            goto gp_fault;
+        *val = (uint64_t) vp->misc_features_enables.cpuid_faulting <<
+                   _MSR_MISC_FEATURES_CPUID_FAULTING;
+        break;
+
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    return X86EMUL_OKAY;
+
+ gp_fault:
+    return X86EMUL_EXCEPTION;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 66cda538fc..d563214fc4 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -834,6 +834,10 @@  static int priv_op_read_msr(unsigned int reg, uint64_t *val,
     const struct vcpu *curr = current;
     const struct domain *currd = curr->domain;
     bool vpmu_msr = false;
+    int ret;
+
+    if ( (ret = guest_rdmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE )
+        return ret;
 
     switch ( reg )
     {
@@ -934,24 +938,6 @@  static int priv_op_read_msr(unsigned int reg, uint64_t *val,
         *val = 0;
         return X86EMUL_OKAY;
 
-    case MSR_INTEL_PLATFORM_INFO:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
-             rdmsr_safe(MSR_INTEL_PLATFORM_INFO, *val) )
-            break;
-        *val = 0;
-        if ( this_cpu(cpuid_faulting_enabled) )
-            *val |= MSR_PLATFORM_INFO_CPUID_FAULTING;
-        return X86EMUL_OKAY;
-
-    case MSR_INTEL_MISC_FEATURES_ENABLES:
-        if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
-             rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, *val) )
-            break;
-        *val = 0;
-        if ( curr->arch.msr->misc_features_enables.cpuid_faulting )
-            *val |= MSR_MISC_FEATURES_CPUID_FAULTING;
-        return X86EMUL_OKAY;
-
     case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
     case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
     case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index 7c8395b9b3..9cc505cb40 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -226,6 +226,14 @@  void init_guest_msr_policy(void);
 int init_domain_msr_policy(struct domain *d);
 int init_vcpu_msr_policy(struct vcpu *v);
 
+/*
+ * Below functions can return X86EMUL_UNHANDLEABLE which means that MSR is
+ * not (yet) handled by it and must be processed by legacy handlers. Such
+ * behaviour is needed for transition period until all rd/wrmsr are handled
+ * by the new MSR infrastructure.
+ */
+int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_MSR_H */