diff mbox series

[v7,6/7] KVM: x86: Load Guest fpu state when accessing MSRs managed by XSAVES

Message ID 20190927021927.23057-7-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce support for Guest CET feature | expand

Commit Message

Yang, Weijiang Sept. 27, 2019, 2:19 a.m. UTC
From: Sean Christopherson <sean.j.christopherson@intel.com>

A handful of CET MSRs are not context switched through "traditional"
methods, e.g. VMCS or manual switching, but rather are passed through
to the guest and are saved and restored by XSAVES/XRSTORS, i.e. the
guest's FPU state.

Load the guest's FPU state if userspace is accessing MSRs whose values
are managed by XSAVES so that the MSR helper, e.g. vmx_{get,set}_msr(),
can simply do {RD,WR}MSR to access the guest's value.

Note that guest_cpuid_has() is not queried as host userspace is allowed
to access MSRs that have not been exposed to the guest, e.g. it might do
KVM_SET_MSRS prior to KVM_SET_CPUID2.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Co-developed-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/x86.c | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

Comments

Jim Mattson Oct. 2, 2019, 7:56 p.m. UTC | #1
On Thu, Sep 26, 2019 at 7:17 PM Yang Weijiang <weijiang.yang@intel.com> wrote:
>
> From: Sean Christopherson <sean.j.christopherson@intel.com>
>
> A handful of CET MSRs are not context switched through "traditional"
> methods, e.g. VMCS or manual switching, but rather are passed through
> to the guest and are saved and restored by XSAVES/XRSTORS, i.e. the
> guest's FPU state.
>
> Load the guest's FPU state if userspace is accessing MSRs whose values
> are managed by XSAVES so that the MSR helper, e.g. vmx_{get,set}_msr(),
> can simply do {RD,WR}MSR to access the guest's value.
>
> Note that guest_cpuid_has() is not queried as host userspace is allowed
> to access MSRs that have not been exposed to the guest, e.g. it might do
> KVM_SET_MSRS prior to KVM_SET_CPUID2.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Co-developed-by: Yang Weijiang <weijiang.yang@intel.com>
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>  arch/x86/kvm/x86.c | 22 +++++++++++++++++++++-
>  1 file changed, 21 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 290c3c3efb87..5b8116028a59 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -104,6 +104,8 @@ static void enter_smm(struct kvm_vcpu *vcpu);
>  static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
>  static void store_regs(struct kvm_vcpu *vcpu);
>  static int sync_regs(struct kvm_vcpu *vcpu);
> +static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
> +static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
>
>  struct kvm_x86_ops *kvm_x86_ops __read_mostly;
>  EXPORT_SYMBOL_GPL(kvm_x86_ops);
> @@ -2999,6 +3001,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  }
>  EXPORT_SYMBOL_GPL(kvm_get_msr_common);
>
> +static bool is_xsaves_msr(u32 index)
> +{
> +       return index == MSR_IA32_U_CET ||
> +              (index >= MSR_IA32_PL0_SSP && index <= MSR_IA32_PL3_SSP);
> +}
> +
>  /*
>   * Read or write a bunch of msrs. All parameters are kernel addresses.
>   *
> @@ -3009,11 +3017,23 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
>                     int (*do_msr)(struct kvm_vcpu *vcpu,
>                                   unsigned index, u64 *data))
>  {
> +       bool fpu_loaded = false;
>         int i;
> +       const u64 cet_bits = XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL;
> +       bool cet_xss = kvm_x86_ops->xsaves_supported() &&
> +                      (kvm_supported_xss() & cet_bits);

It seems like I've seen a lot of checks like this. Can this be
simplified (throughout this series) by sinking the
kvm_x86_ops->xsaves_supported() check into kvm_supported_xss()? That
is, shouldn't kvm_supported_xss() return 0 if
kvm_x86_ops->xsaves_supported() is false?

> -       for (i = 0; i < msrs->nmsrs; ++i)
> +       for (i = 0; i < msrs->nmsrs; ++i) {
> +               if (!fpu_loaded && cet_xss &&
> +                   is_xsaves_msr(entries[i].index)) {
> +                       kvm_load_guest_fpu(vcpu);
> +                       fpu_loaded = true;
> +               }
>                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
>                         break;
> +       }
> +       if (fpu_loaded)
> +               kvm_put_guest_fpu(vcpu);
>
>         return i;
>  }
> --
> 2.17.2
>
Yang, Weijiang Oct. 9, 2019, 6:46 a.m. UTC | #2
On Wed, Oct 02, 2019 at 12:56:30PM -0700, Jim Mattson wrote:
> On Thu, Sep 26, 2019 at 7:17 PM Yang Weijiang <weijiang.yang@intel.com> wrote:
> >
> > From: Sean Christopherson <sean.j.christopherson@intel.com>
> >
 >  /*
> >   * Read or write a bunch of msrs. All parameters are kernel addresses.
> >   *
> > @@ -3009,11 +3017,23 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
> >                     int (*do_msr)(struct kvm_vcpu *vcpu,
> >                                   unsigned index, u64 *data))
> >  {
> > +       bool fpu_loaded = false;
> >         int i;
> > +       const u64 cet_bits = XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL;
> > +       bool cet_xss = kvm_x86_ops->xsaves_supported() &&
> > +                      (kvm_supported_xss() & cet_bits);
> 
> It seems like I've seen a lot of checks like this. Can this be
> simplified (throughout this series) by sinking the
> kvm_x86_ops->xsaves_supported() check into kvm_supported_xss()? That
> is, shouldn't kvm_supported_xss() return 0 if
> kvm_x86_ops->xsaves_supported() is false?
>
OK, let me add this check, thank you!

> > -       for (i = 0; i < msrs->nmsrs; ++i)
> > +       for (i = 0; i < msrs->nmsrs; ++i) {
> > +               if (!fpu_loaded && cet_xss &&
> > +                   is_xsaves_msr(entries[i].index)) {
> > +                       kvm_load_guest_fpu(vcpu);
> > +                       fpu_loaded = true;
> > +               }
> >                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
> >                         break;
> > +       }
> > +       if (fpu_loaded)
> > +               kvm_put_guest_fpu(vcpu);
> >
> >         return i;
> >  }
> > --
> > 2.17.2
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 290c3c3efb87..5b8116028a59 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -104,6 +104,8 @@  static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 static void store_regs(struct kvm_vcpu *vcpu);
 static int sync_regs(struct kvm_vcpu *vcpu);
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -2999,6 +3001,12 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 }
 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
 
+static bool is_xsaves_msr(u32 index)
+{
+	return index == MSR_IA32_U_CET ||
+	       (index >= MSR_IA32_PL0_SSP && index <= MSR_IA32_PL3_SSP);
+}
+
 /*
  * Read or write a bunch of msrs. All parameters are kernel addresses.
  *
@@ -3009,11 +3017,23 @@  static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
 		    int (*do_msr)(struct kvm_vcpu *vcpu,
 				  unsigned index, u64 *data))
 {
+	bool fpu_loaded = false;
 	int i;
+	const u64 cet_bits = XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL;
+	bool cet_xss = kvm_x86_ops->xsaves_supported() &&
+		       (kvm_supported_xss() & cet_bits);
 
-	for (i = 0; i < msrs->nmsrs; ++i)
+	for (i = 0; i < msrs->nmsrs; ++i) {
+		if (!fpu_loaded && cet_xss &&
+		    is_xsaves_msr(entries[i].index)) {
+			kvm_load_guest_fpu(vcpu);
+			fpu_loaded = true;
+		}
 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
 			break;
+	}
+	if (fpu_loaded)
+		kvm_put_guest_fpu(vcpu);
 
 	return i;
 }