diff mbox

[v2,1/3] x86/hvm: Don't raise #GP behind the emulators back for MSR accesses

Message ID 1487586529-27092-2-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Feb. 20, 2017, 10:28 a.m. UTC
The current hvm_msr_{read,write}_intercept() infrastructure calls
hvm_inject_hw_exception() directly to latch a fault, and returns
X86EMUL_EXCEPTION to its caller.

This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as the
fault is raised behind the back of the x86 emulator.

Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>

v2:
 * Substantial rebase
 * Introduce __must_check for hvm_msr_{read,write}_intercept()
---
 xen/arch/x86/hvm/emulate.c        | 14 ++++++++++++--
 xen/arch/x86/hvm/hvm.c            |  7 ++++---
 xen/arch/x86/hvm/svm/svm.c        |  4 ++--
 xen/arch/x86/hvm/vmx/vmx.c        | 23 ++++++++++++++++++-----
 xen/arch/x86/hvm/vmx/vvmx.c       | 19 ++++++++++++++-----
 xen/include/asm-x86/hvm/support.h | 12 +++++++++---
 6 files changed, 59 insertions(+), 20 deletions(-)

Comments

Paul Durrant Feb. 20, 2017, 10:34 a.m. UTC | #1
> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: 20 February 2017 10:29
> To: Xen-devel <xen-devel@lists.xen.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; Boris Ostrovsky <boris.ostrovsky@oracle.com>;
> Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> Subject: [PATCH v2 1/3] x86/hvm: Don't raise #GP behind the emulators back
> for MSR accesses
> 
> The current hvm_msr_{read,write}_intercept() infrastructure calls
> hvm_inject_hw_exception() directly to latch a fault, and returns
> X86EMUL_EXCEPTION to its caller.
> 
> This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as
> the
> fault is raised behind the back of the x86 emulator.
> 
> Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
> X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> ---
> CC: Paul Durrant <paul.durrant@citrix.com>
> CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
>

hvm/emulate.c changes...

Paul Durrant <paul.durrant@citrix.com>
 
> v2:
>  * Substantial rebase
>  * Introduce __must_check for hvm_msr_{read,write}_intercept()
> ---
>  xen/arch/x86/hvm/emulate.c        | 14 ++++++++++++--
>  xen/arch/x86/hvm/hvm.c            |  7 ++++---
>  xen/arch/x86/hvm/svm/svm.c        |  4 ++--
>  xen/arch/x86/hvm/vmx/vmx.c        | 23 ++++++++++++++++++-----
>  xen/arch/x86/hvm/vmx/vvmx.c       | 19 ++++++++++++++-----
>  xen/include/asm-x86/hvm/support.h | 12 +++++++++---
>  6 files changed, 59 insertions(+), 20 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 14f9b43..edcae5e 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1544,7 +1544,12 @@ static int hvmemul_read_msr(
>      uint64_t *val,
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    return hvm_msr_read_intercept(reg, val);
> +    int rc = hvm_msr_read_intercept(reg, val);
> +
> +    if ( rc == X86EMUL_EXCEPTION )
> +        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
> +
> +    return rc;
>  }
> 
>  static int hvmemul_write_msr(
> @@ -1552,7 +1557,12 @@ static int hvmemul_write_msr(
>      uint64_t val,
>      struct x86_emulate_ctxt *ctxt)
>  {
> -    return hvm_msr_write_intercept(reg, val, 1);
> +    int rc = hvm_msr_write_intercept(reg, val, 1);
> +
> +    if ( rc == X86EMUL_EXCEPTION )
> +        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
> +
> +    return rc;
>  }
> 
>  static int hvmemul_wbinvd(
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 6621d62..08855c2 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -518,7 +518,10 @@ void hvm_do_resume(struct vcpu *v)
> 
>          if ( w->do_write.msr )
>          {
> -            hvm_msr_write_intercept(w->msr, w->value, 0);
> +            if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
> +                 X86EMUL_EXCEPTION )
> +                hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +
>              w->do_write.msr = 0;
>          }
> 
> @@ -3455,7 +3458,6 @@ int hvm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      return ret;
> 
>   gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      ret = X86EMUL_EXCEPTION;
>      *msr_content = -1ull;
>      goto out;
> @@ -3600,7 +3602,6 @@ int hvm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content,
>      return ret;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 894c457..b864535 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>      return X86EMUL_OKAY;
> 
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>      return result;
> 
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -1924,6 +1922,8 @@ static void svm_do_msr_access(struct
> cpu_user_regs *regs)
> 
>      if ( rc == X86EMUL_OKAY )
>          __update_guest_eip(regs, inst_len);
> +    else if ( rc == X86EMUL_EXCEPTION )
> +        hvm_inject_hw_exception(TRAP_gp_fault, 0);
>  }
> 
>  static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 597d7ac..b5bfa05 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2734,7 +2734,6 @@ static int vmx_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>      return X86EMUL_OKAY;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -2971,7 +2970,6 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>      return X86EMUL_OKAY;
> 
>  gp_fault:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
> 
> @@ -3664,18 +3662,33 @@ void vmx_vmexit_handler(struct cpu_user_regs
> *regs)
>          break;
>      case EXIT_REASON_MSR_READ:
>      {
> -        uint64_t msr_content;
> -        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) ==
> X86EMUL_OKAY )
> +        uint64_t msr_content = 0;
> +
> +        switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
>          {
> +        case X86EMUL_OKAY:
>              msr_split(regs, msr_content);
>              update_guest_eip(); /* Safe: RDMSR */
> +            break;
> +
> +        case X86EMUL_EXCEPTION:
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +            break;
>          }
>          break;
>      }
> 
>      case EXIT_REASON_MSR_WRITE:
> -        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) ==
> X86EMUL_OKAY )
> +        switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
> +        {
> +        case X86EMUL_OKAY:
>              update_guest_eip(); /* Safe: WRMSR */
> +            break;
> +
> +        case X86EMUL_EXCEPTION:
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +            break;
> +        }
>          break;
> 
>      case EXIT_REASON_VMXOFF:
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index f6a25a6..c830d16 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1032,6 +1032,7 @@ static void load_shadow_guest_state(struct vcpu
> *v)
>      struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
>      u32 control;
>      u64 cr_gh_mask, cr_read_shadow;
> +    int rc;
> 
>      static const u16 vmentry_fields[] = {
>          VM_ENTRY_INTR_INFO,
> @@ -1053,8 +1054,12 @@ static void load_shadow_guest_state(struct vcpu
> *v)
>      if ( control & VM_ENTRY_LOAD_GUEST_PAT )
>          hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
>      if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
> -        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> -                                get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
> +    {
> +        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> +                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
> +        if ( rc == X86EMUL_EXCEPTION )
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +    }
> 
>      hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
> 
> @@ -1222,7 +1227,7 @@ static void sync_vvmcs_ro(struct vcpu *v)
> 
>  static void load_vvmcs_host_state(struct vcpu *v)
>  {
> -    int i;
> +    int i, rc;
>      u64 r;
>      u32 control;
> 
> @@ -1240,8 +1245,12 @@ static void load_vvmcs_host_state(struct vcpu *v)
>      if ( control & VM_EXIT_LOAD_HOST_PAT )
>          hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
>      if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
> -        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> -                                get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
> +    {
> +        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
> +                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
> +        if ( rc == X86EMUL_EXCEPTION )
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +    }
> 
>      hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
> 
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 262955d..5e25698 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -121,13 +121,19 @@ int hvm_set_efer(uint64_t value);
>  int hvm_set_cr0(unsigned long value, bool_t may_defer);
>  int hvm_set_cr3(unsigned long value, bool_t may_defer);
>  int hvm_set_cr4(unsigned long value, bool_t may_defer);
> -int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
> -int hvm_msr_write_intercept(
> -    unsigned int msr, uint64_t msr_content, bool_t may_defer);
>  int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
>  int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
>  void hvm_ud_intercept(struct cpu_user_regs *);
> 
> +/*
> + * May return X86EMUL_EXCEPTION, at which point the caller is responsible
> for
> + * injecting a #GP fault.  Used to support speculative reads.
> + */
> +int __must_check hvm_msr_read_intercept(
> +    unsigned int msr, uint64_t *msr_content);
> +int __must_check hvm_msr_write_intercept(
> +    unsigned int msr, uint64_t msr_content, bool_t may_defer);
> +
>  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
> 
>  /*
> --
> 2.1.4
Boris Ostrovsky Feb. 21, 2017, 1:46 p.m. UTC | #2
On 02/20/2017 05:28 AM, Andrew Cooper wrote:
> The current hvm_msr_{read,write}_intercept() infrastructure calls
> hvm_inject_hw_exception() directly to latch a fault, and returns
> X86EMUL_EXCEPTION to its caller.
>
> This behaviour is problematic for the hvmemul_{read,write}_msr() paths, as the
> fault is raised behind the back of the x86 emulator.
>
> Alter the behaviour so hvm_msr_{read,write}_intercept() simply returns
> X86EMUL_EXCEPTION, leaving the callers to actually inject the #GP fault.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Kevin Tian <kevin.tian@intel.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>


>  
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 894c457..b864535 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
>      return X86EMUL_OKAY;
>  
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }
>  
> @@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
>      return result;
>  
>   gpf:
> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>      return X86EMUL_EXCEPTION;
>  }

The label can be dropped with a direct return instead of a 'goto'.
Either way

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Andrew Cooper Feb. 21, 2017, 1:50 p.m. UTC | #3
On 21/02/17 13:46, Boris Ostrovsky wrote:
>> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
>> index 894c457..b864535 100644
>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -1744,7 +1744,6 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
>>      return X86EMUL_OKAY;
>>  
>>   gpf:
>> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>>      return X86EMUL_EXCEPTION;
>>  }
>>  
>> @@ -1897,7 +1896,6 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
>>      return result;
>>  
>>   gpf:
>> -    hvm_inject_hw_exception(TRAP_gp_fault, 0);
>>      return X86EMUL_EXCEPTION;
>>  }
> The label can be dropped with a direct return instead of a 'goto'.
> Either way

I will do cleanup like that into the start of the MSR levelling work,
which will be bringing other changes to the functions as well.

~Andrew
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 14f9b43..edcae5e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1544,7 +1544,12 @@  static int hvmemul_read_msr(
     uint64_t *val,
     struct x86_emulate_ctxt *ctxt)
 {
-    return hvm_msr_read_intercept(reg, val);
+    int rc = hvm_msr_read_intercept(reg, val);
+
+    if ( rc == X86EMUL_EXCEPTION )
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+    return rc;
 }
 
 static int hvmemul_write_msr(
@@ -1552,7 +1557,12 @@  static int hvmemul_write_msr(
     uint64_t val,
     struct x86_emulate_ctxt *ctxt)
 {
-    return hvm_msr_write_intercept(reg, val, 1);
+    int rc = hvm_msr_write_intercept(reg, val, 1);
+
+    if ( rc == X86EMUL_EXCEPTION )
+        x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+    return rc;
 }
 
 static int hvmemul_wbinvd(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6621d62..08855c2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -518,7 +518,10 @@  void hvm_do_resume(struct vcpu *v)
 
         if ( w->do_write.msr )
         {
-            hvm_msr_write_intercept(w->msr, w->value, 0);
+            if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+                 X86EMUL_EXCEPTION )
+                hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
             w->do_write.msr = 0;
         }
 
@@ -3455,7 +3458,6 @@  int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     return ret;
 
  gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     ret = X86EMUL_EXCEPTION;
     *msr_content = -1ull;
     goto out;
@@ -3600,7 +3602,6 @@  int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     return ret;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 894c457..b864535 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1744,7 +1744,6 @@  static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     return X86EMUL_OKAY;
 
  gpf:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1897,7 +1896,6 @@  static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
     return result;
 
  gpf:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1924,6 +1922,8 @@  static void svm_do_msr_access(struct cpu_user_regs *regs)
 
     if ( rc == X86EMUL_OKAY )
         __update_guest_eip(regs, inst_len);
+    else if ( rc == X86EMUL_EXCEPTION )
+        hvm_inject_hw_exception(TRAP_gp_fault, 0);
 }
 
 static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 597d7ac..b5bfa05 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2734,7 +2734,6 @@  static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     return X86EMUL_OKAY;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -2971,7 +2970,6 @@  static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
     return X86EMUL_OKAY;
 
 gp_fault:
-    hvm_inject_hw_exception(TRAP_gp_fault, 0);
     return X86EMUL_EXCEPTION;
 }
 
@@ -3664,18 +3662,33 @@  void vmx_vmexit_handler(struct cpu_user_regs *regs)
         break;
     case EXIT_REASON_MSR_READ:
     {
-        uint64_t msr_content;
-        if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
+        uint64_t msr_content = 0;
+
+        switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
         {
+        case X86EMUL_OKAY:
             msr_split(regs, msr_content);
             update_guest_eip(); /* Safe: RDMSR */
+            break;
+
+        case X86EMUL_EXCEPTION:
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
         }
         break;
     }
 
     case EXIT_REASON_MSR_WRITE:
-        if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
+        switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+        {
+        case X86EMUL_OKAY:
             update_guest_eip(); /* Safe: WRMSR */
+            break;
+
+        case X86EMUL_EXCEPTION:
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
+        }
         break;
 
     case EXIT_REASON_VMXOFF:
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index f6a25a6..c830d16 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1032,6 +1032,7 @@  static void load_shadow_guest_state(struct vcpu *v)
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     u32 control;
     u64 cr_gh_mask, cr_read_shadow;
+    int rc;
 
     static const u16 vmentry_fields[] = {
         VM_ENTRY_INTR_INFO,
@@ -1053,8 +1054,12 @@  static void load_shadow_guest_state(struct vcpu *v)
     if ( control & VM_ENTRY_LOAD_GUEST_PAT )
         hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
     if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
-        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+    {
+        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+                                     get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+        if ( rc == X86EMUL_EXCEPTION )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+    }
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
@@ -1222,7 +1227,7 @@  static void sync_vvmcs_ro(struct vcpu *v)
 
 static void load_vvmcs_host_state(struct vcpu *v)
 {
-    int i;
+    int i, rc;
     u64 r;
     u32 control;
 
@@ -1240,8 +1245,12 @@  static void load_vvmcs_host_state(struct vcpu *v)
     if ( control & VM_EXIT_LOAD_HOST_PAT )
         hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
     if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
-        hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
-                                get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+    {
+        rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+                                     get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+        if ( rc == X86EMUL_EXCEPTION )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+    }
 
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
 
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 262955d..5e25698 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -121,13 +121,19 @@  int hvm_set_efer(uint64_t value);
 int hvm_set_cr0(unsigned long value, bool_t may_defer);
 int hvm_set_cr3(unsigned long value, bool_t may_defer);
 int hvm_set_cr4(unsigned long value, bool_t may_defer);
-int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(
-    unsigned int msr, uint64_t msr_content, bool_t may_defer);
 int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
 int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
 void hvm_ud_intercept(struct cpu_user_regs *);
 
+/*
+ * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
+ * injecting a #GP fault.  Used to support speculative reads.
+ */
+int __must_check hvm_msr_read_intercept(
+    unsigned int msr, uint64_t *msr_content);
+int __must_check hvm_msr_write_intercept(
+    unsigned int msr, uint64_t msr_content, bool_t may_defer);
+
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
 
 /*