diff mbox series

[04/10] KVM: VMX: Fold vpid_sync_vcpu_{single,global}() into vpid_sync_context()

Message ID 20200220204356.8837-5-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Clean up VMX's TLB flushing code | expand

Commit Message

Sean Christopherson Feb. 20, 2020, 8:43 p.m. UTC
Fold vpid_sync_vcpu_global() and vpid_sync_vcpu_single() into their sole
caller.  KVM should always prefer the single variant, i.e. the only
reason to use the global variant is if the CPU doesn't support
invalidating a single VPID, which is the entire purpose of wrapping the
calls with vpid_sync_context().

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/vmx/ops.h | 16 ++--------------
 1 file changed, 2 insertions(+), 14 deletions(-)

Comments

Vitaly Kuznetsov Feb. 21, 2020, 1:39 p.m. UTC | #1
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> Fold vpid_sync_vcpu_global() and vpid_sync_vcpu_single() into their sole
> caller.  KVM should always prefer the single variant, i.e. the only
> reason to use the global variant is if the CPU doesn't support
> invalidating a single VPID, which is the entire purpose of wrapping the
> calls with vpid_sync_context().
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/vmx/ops.h | 16 ++--------------
>  1 file changed, 2 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
> index 612df1bdb26b..eb6adc77a55d 100644
> --- a/arch/x86/kvm/vmx/ops.h
> +++ b/arch/x86/kvm/vmx/ops.h
> @@ -253,29 +253,17 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
>  	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
>  }
>  
> -static inline void vpid_sync_vcpu_single(int vpid)
> +static inline void vpid_sync_context(int vpid)
>  {
>  	if (vpid == 0)
>  		return;
>  
>  	if (cpu_has_vmx_invvpid_single())
>  		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
> -}
> -
> -static inline void vpid_sync_vcpu_global(void)
> -{
> -	if (cpu_has_vmx_invvpid_global())
> +	else
>  		__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
>  }
>  
> -static inline void vpid_sync_context(int vpid)
> -{
> -	if (cpu_has_vmx_invvpid_single())
> -		vpid_sync_vcpu_single(vpid);
> -	else
> -		vpid_sync_vcpu_global();
> -}
> -
>  static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
>  {
>  	if (vpid == 0)

In the original code it's only vpid_sync_vcpu_single() which has 'vpid
== 0' check, vpid_sync_vcpu_global() doesn't have it. So in the
hypothetical situation when cpu_has_vmx_invvpid_single() is false AND
we've e.g. exhausted our VPID space and allocate_vpid() returned zero,
the new code just won't do anything while the old one would've done
__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0), right?
Sean Christopherson Feb. 21, 2020, 3:32 p.m. UTC | #2
On Fri, Feb 21, 2020 at 02:39:51PM +0100, Vitaly Kuznetsov wrote:
> Sean Christopherson <sean.j.christopherson@intel.com> writes:
> 
> > Fold vpid_sync_vcpu_global() and vpid_sync_vcpu_single() into their sole
> > caller.  KVM should always prefer the single variant, i.e. the only
> > reason to use the global variant is if the CPU doesn't support
> > invalidating a single VPID, which is the entire purpose of wrapping the
> > calls with vpid_sync_context().
> >
> > No functional change intended.
> >
> > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > ---
> >  arch/x86/kvm/vmx/ops.h | 16 ++--------------
> >  1 file changed, 2 insertions(+), 14 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
> > index 612df1bdb26b..eb6adc77a55d 100644
> > --- a/arch/x86/kvm/vmx/ops.h
> > +++ b/arch/x86/kvm/vmx/ops.h
> > @@ -253,29 +253,17 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
> >  	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
> >  }
> >  
> > -static inline void vpid_sync_vcpu_single(int vpid)
> > +static inline void vpid_sync_context(int vpid)
> >  {
> >  	if (vpid == 0)
> >  		return;
> >  
> >  	if (cpu_has_vmx_invvpid_single())
> >  		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
> > -}
> > -
> > -static inline void vpid_sync_vcpu_global(void)
> > -{
> > -	if (cpu_has_vmx_invvpid_global())
> > +	else
> >  		__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
> >  }
> >  
> > -static inline void vpid_sync_context(int vpid)
> > -{
> > -	if (cpu_has_vmx_invvpid_single())
> > -		vpid_sync_vcpu_single(vpid);
> > -	else
> > -		vpid_sync_vcpu_global();
> > -}
> > -
> >  static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
> >  {
> >  	if (vpid == 0)
> 
> In the original code it's only vpid_sync_vcpu_single() which has 'vpid
> == 0' check, vpid_sync_vcpu_global() doesn't have it. So in the
> hypothetical situation when cpu_has_vmx_invvpid_single() is false AND
> we've e.g. exhausted our VPID space and allocate_vpid() returned zero,
> the new code just won't do anything while the old one would've done
> __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0), right?

Ah rats.  I lost track of that functional change between making the commit
and writing the changelog.

I'll spin a v2 to rewrite the changelog, and maybe add the "vpid == 0"
check in a separate patch.
Paolo Bonzini Feb. 21, 2020, 5:28 p.m. UTC | #3
On 21/02/20 16:32, Sean Christopherson wrote:
>> In the original code it's only vpid_sync_vcpu_single() which has 'vpid
>> == 0' check, vpid_sync_vcpu_global() doesn't have it. So in the
>> hypothetical situation when cpu_has_vmx_invvpid_single() is false AND
>> we've e.g. exhausted our VPID space and allocate_vpid() returned zero,
>> the new code just won't do anything while the old one would've done
>> __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0), right?
> Ah rats.  I lost track of that functional change between making the commit
> and writing the changelog.
> 
> I'll spin a v2 to rewrite the changelog, and maybe add the "vpid == 0"
> check in a separate patch.
> 

What about this:

diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
index eb6adc77a55d..2ab88984b22f 100644
--- a/arch/x86/kvm/vmx/ops.h
+++ b/arch/x86/kvm/vmx/ops.h
@@ -255,13 +255,10 @@ static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 
 static inline void vpid_sync_context(int vpid)
 {
-	if (vpid == 0)
-		return;
-
-	if (cpu_has_vmx_invvpid_single())
-		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
-	else
+	if (!cpu_has_vmx_invvpid_single())
 		__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
+	else if (vpid != 0)
+		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
 }
 
 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
index 612df1bdb26b..eb6adc77a55d 100644
--- a/arch/x86/kvm/vmx/ops.h
+++ b/arch/x86/kvm/vmx/ops.h
@@ -253,29 +253,17 @@  static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 	vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
 }
 
-static inline void vpid_sync_vcpu_single(int vpid)
+static inline void vpid_sync_context(int vpid)
 {
 	if (vpid == 0)
 		return;
 
 	if (cpu_has_vmx_invvpid_single())
 		__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
-}
-
-static inline void vpid_sync_vcpu_global(void)
-{
-	if (cpu_has_vmx_invvpid_global())
+	else
 		__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
 }
 
-static inline void vpid_sync_context(int vpid)
-{
-	if (cpu_has_vmx_invvpid_single())
-		vpid_sync_vcpu_single(vpid);
-	else
-		vpid_sync_vcpu_global();
-}
-
 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
 {
 	if (vpid == 0)