diff mbox series

[v2,10/29] KVM: nVMX: Capture VM-Fail to a local var in nested_vmx_check_vmentry_hw()

Message ID 20190124175845.15926-11-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: VMX: Move vCPU-run to proper asm sub-routine | expand

Commit Message

Sean Christopherson Jan. 24, 2019, 5:58 p.m. UTC
Unlike the primary vCPU-run flow, the nested early checks code doesn't
actually want to propagate VM-Fail back 'vmx'.  Yay copy+paste.

In additional to eliminating the need to clear vmx->fail before
returning, using a local boolean also drops a reference to 'vmx' in the
asm blob.  Dropping the reference to 'vmx' will save a register in the
long run as future patches will shift all pointer references from 'vmx'
to 'vmx->loaded_vmcs'.

Fixes: 52017608da33 ("KVM: nVMX: add option to perform early consistency checks via H/W")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/nested.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

Comments

Jim Mattson Jan. 24, 2019, 11:18 p.m. UTC | #1
On Thu, Jan 24, 2019 at 9:59 AM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Unlike the primary vCPU-run flow, the nested early checks code doesn't
> actually want to propagate VM-Fail back 'vmx'.  Yay copy+paste.
>
> In additional to eliminating the need to clear vmx->fail before
> returning, using a local boolean also drops a reference to 'vmx' in the
> asm blob.  Dropping the reference to 'vmx' will save a register in the
> long run as future patches will shift all pointer references from 'vmx'
> to 'vmx->loaded_vmcs'.
>
> Fixes: 52017608da33 ("KVM: nVMX: add option to perform early consistency checks via H/W")
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/vmx/nested.c | 16 ++++++++++------
>  1 file changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index b92291b8e54b..495edb36b525 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -2717,6 +2717,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>  {
>         struct vcpu_vmx *vmx = to_vmx(vcpu);
>         unsigned long cr3, cr4;
> +       bool vm_fail;
>
>         if (!nested_early_check)
>                 return 0;
> @@ -2762,14 +2763,18 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
>                 /* Check if vmlaunch or vmresume is needed */
>                 "cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
>
> +               /*
> +                * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
> +                * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
> +                * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
> +                * results of VM-Enter is captured via SETBE to vm_fail.
> +                */
>                 "call vmx_vmenter\n\t"
>
> -               /* Set vmx->fail accordingly */
> -               "setbe %c[fail](%% " _ASM_CX")\n\t"
> -             : ASM_CALL_CONSTRAINT
> +               "setbe %%dl\n\t"
> +             : ASM_CALL_CONSTRAINT, "=dl"(vm_fail)

Why hardcode %dl? Can't you use an '=q' constraint and let the compiler choose?
Sean Christopherson Jan. 24, 2019, 11:25 p.m. UTC | #2
On Thu, Jan 24, 2019 at 03:18:58PM -0800, Jim Mattson wrote:
> On Thu, Jan 24, 2019 at 9:59 AM Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> >
> > Unlike the primary vCPU-run flow, the nested early checks code doesn't
> > actually want to propagate VM-Fail back 'vmx'.  Yay copy+paste.
> >
> > In additional to eliminating the need to clear vmx->fail before
> > returning, using a local boolean also drops a reference to 'vmx' in the
> > asm blob.  Dropping the reference to 'vmx' will save a register in the
> > long run as future patches will shift all pointer references from 'vmx'
> > to 'vmx->loaded_vmcs'.
> >
> > Fixes: 52017608da33 ("KVM: nVMX: add option to perform early consistency checks via H/W")
> > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > ---
> >  arch/x86/kvm/vmx/nested.c | 16 ++++++++++------
> >  1 file changed, 10 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> > index b92291b8e54b..495edb36b525 100644
> > --- a/arch/x86/kvm/vmx/nested.c
> > +++ b/arch/x86/kvm/vmx/nested.c
> > @@ -2717,6 +2717,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
> >  {
> >         struct vcpu_vmx *vmx = to_vmx(vcpu);
> >         unsigned long cr3, cr4;
> > +       bool vm_fail;
> >
> >         if (!nested_early_check)
> >                 return 0;
> > @@ -2762,14 +2763,18 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
> >                 /* Check if vmlaunch or vmresume is needed */
> >                 "cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
> >
> > +               /*
> > +                * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
> > +                * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
> > +                * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
> > +                * results of VM-Enter is captured via SETBE to vm_fail.
> > +                */
> >                 "call vmx_vmenter\n\t"
> >
> > -               /* Set vmx->fail accordingly */
> > -               "setbe %c[fail](%% " _ASM_CX")\n\t"
> > -             : ASM_CALL_CONSTRAINT
> > +               "setbe %%dl\n\t"
> > +             : ASM_CALL_CONSTRAINT, "=dl"(vm_fail)
> 
> Why hardcode %dl? Can't you use an '=q' constraint and let the compiler choose?

I got lazy because manually doing setbe goes away in the next patch.
I'll fix this in v3.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b92291b8e54b..495edb36b525 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2717,6 +2717,7 @@  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	unsigned long cr3, cr4;
+	bool vm_fail;
 
 	if (!nested_early_check)
 		return 0;
@@ -2762,14 +2763,18 @@  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 		/* Check if vmlaunch or vmresume is needed */
 		"cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
 
+		/*
+		 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
+		 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
+		 * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
+		 * results of VM-Enter is captured via SETBE to vm_fail.
+		 */
 		"call vmx_vmenter\n\t"
 
-		/* Set vmx->fail accordingly */
-		"setbe %c[fail](%% " _ASM_CX")\n\t"
-	      : ASM_CALL_CONSTRAINT
+		"setbe %%dl\n\t"
+	      : ASM_CALL_CONSTRAINT, "=dl"(vm_fail)
 	      : "c"(vmx), "d"((unsigned long)HOST_RSP),
 		[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
-		[fail]"i"(offsetof(struct vcpu_vmx, fail)),
 		[host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
 		[wordsize]"i"(sizeof(ulong))
 	      : "cc", "memory"
@@ -2782,10 +2787,9 @@  static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 	if (vmx->msr_autoload.guest.nr)
 		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
 
-	if (vmx->fail) {
+	if (vm_fail) {
 		WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
 			     VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		vmx->fail = 0;
 		return 1;
 	}