diff mbox

kvm: svm: Ensure an IBPB on all affected CPUs when freeing a vmcb

Message ID 20180522165420.22196-1-jmattson@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jim Mattson May 22, 2018, 4:54 p.m. UTC
Previously, we only called indirect_branch_prediction_barrier on the
logical CPU that freed a vmcb. This function should be called on all
logical CPUs that last loaded the vmcb in question.

Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
Reported-by: Neel Natu <neelnatu@google.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/svm.c | 20 +++++++++++++++-----
 1 file changed, 15 insertions(+), 5 deletions(-)

Comments

Konrad Rzeszutek Wilk May 30, 2018, 4:24 p.m. UTC | #1
On Tue, May 22, 2018 at 09:54:20AM -0700, Jim Mattson wrote:
> Previously, we only called indirect_branch_prediction_barrier on the
> logical CPU that freed a vmcb. This function should be called on all
> logical CPUs that last loaded the vmcb in question.
> 
> Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
> Reported-by: Neel Natu <neelnatu@google.com>
> Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Thank you!
> ---
>  arch/x86/kvm/svm.c | 20 +++++++++++++++-----
>  1 file changed, 15 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 220e5a89465a..ffa27f75e323 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2172,21 +2172,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	return ERR_PTR(err);
>  }
>  
> +static void svm_clear_current_vmcb(struct vmcb *vmcb)
> +{
> +	int i;
> +
> +	for_each_online_cpu(i)
> +		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
> +}
> +
>  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
> +	/*
> +	 * The vmcb page can be recycled, causing a false negative in
> +	 * svm_vcpu_load(). So, ensure that no logical CPU has this
> +	 * vmcb page recorded as its current vmcb.
> +	 */
> +	svm_clear_current_vmcb(svm->vmcb);
> +
>  	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
>  	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
>  	__free_page(virt_to_page(svm->nested.hsave));
>  	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>  	kvm_vcpu_uninit(vcpu);
>  	kmem_cache_free(kvm_vcpu_cache, svm);
> -	/*
> -	 * The vmcb page can be recycled, causing a false negative in
> -	 * svm_vcpu_load(). So do a full IBPB now.
> -	 */
> -	indirect_branch_prediction_barrier();
>  }
>  
>  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> -- 
> 2.17.0.441.gb46fe60e1d-goog
>
Jim Mattson Nov. 26, 2018, 9:05 p.m. UTC | #2
On Wed, May 30, 2018 at 9:33 AM Konrad Rzeszutek Wilk
<konrad.wilk@oracle.com> wrote:
>
> On Tue, May 22, 2018 at 09:54:20AM -0700, Jim Mattson wrote:
> > Previously, we only called indirect_branch_prediction_barrier on the
> > logical CPU that freed a vmcb. This function should be called on all
> > logical CPUs that last loaded the vmcb in question.
> >
> > Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
> > Reported-by: Neel Natu <neelnatu@google.com>
> > Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>
> Thank you!
> > ---
> >  arch/x86/kvm/svm.c | 20 +++++++++++++++-----
> >  1 file changed, 15 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> > index 220e5a89465a..ffa27f75e323 100644
> > --- a/arch/x86/kvm/svm.c
> > +++ b/arch/x86/kvm/svm.c
> > @@ -2172,21 +2172,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
> >       return ERR_PTR(err);
> >  }
> >
> > +static void svm_clear_current_vmcb(struct vmcb *vmcb)
> > +{
> > +     int i;
> > +
> > +     for_each_online_cpu(i)
> > +             cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
> > +}
> > +
> >  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
> >  {
> >       struct vcpu_svm *svm = to_svm(vcpu);
> >
> > +     /*
> > +      * The vmcb page can be recycled, causing a false negative in
> > +      * svm_vcpu_load(). So, ensure that no logical CPU has this
> > +      * vmcb page recorded as its current vmcb.
> > +      */
> > +     svm_clear_current_vmcb(svm->vmcb);
> > +
> >       __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
> >       __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
> >       __free_page(virt_to_page(svm->nested.hsave));
> >       __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
> >       kvm_vcpu_uninit(vcpu);
> >       kmem_cache_free(kvm_vcpu_cache, svm);
> > -     /*
> > -      * The vmcb page can be recycled, causing a false negative in
> > -      * svm_vcpu_load(). So do a full IBPB now.
> > -      */
> > -     indirect_branch_prediction_barrier();
> >  }
> >
> >  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> > --
> > 2.17.0.441.gb46fe60e1d-goog
> >

Ping.
Paolo Bonzini Nov. 27, 2018, 11:12 a.m. UTC | #3
On 26/11/18 22:05, Jim Mattson wrote:
> On Wed, May 30, 2018 at 9:33 AM Konrad Rzeszutek Wilk
> <konrad.wilk@oracle.com> wrote:
>>
>> On Tue, May 22, 2018 at 09:54:20AM -0700, Jim Mattson wrote:
>>> Previously, we only called indirect_branch_prediction_barrier on the
>>> logical CPU that freed a vmcb. This function should be called on all
>>> logical CPUs that last loaded the vmcb in question.
>>>
>>> Fixes: 15d45071523d ("KVM/x86: Add IBPB support")
>>> Reported-by: Neel Natu <neelnatu@google.com>
>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>>
>> Thank you!
>>> ---
>>>  arch/x86/kvm/svm.c | 20 +++++++++++++++-----
>>>  1 file changed, 15 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>>> index 220e5a89465a..ffa27f75e323 100644
>>> --- a/arch/x86/kvm/svm.c
>>> +++ b/arch/x86/kvm/svm.c
>>> @@ -2172,21 +2172,31 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>>>       return ERR_PTR(err);
>>>  }
>>>
>>> +static void svm_clear_current_vmcb(struct vmcb *vmcb)
>>> +{
>>> +     int i;
>>> +
>>> +     for_each_online_cpu(i)
>>> +             cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
>>> +}
>>> +
>>>  static void svm_free_vcpu(struct kvm_vcpu *vcpu)
>>>  {
>>>       struct vcpu_svm *svm = to_svm(vcpu);
>>>
>>> +     /*
>>> +      * The vmcb page can be recycled, causing a false negative in
>>> +      * svm_vcpu_load(). So, ensure that no logical CPU has this
>>> +      * vmcb page recorded as its current vmcb.
>>> +      */
>>> +     svm_clear_current_vmcb(svm->vmcb);
>>> +
>>>       __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
>>>       __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
>>>       __free_page(virt_to_page(svm->nested.hsave));
>>>       __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
>>>       kvm_vcpu_uninit(vcpu);
>>>       kmem_cache_free(kvm_vcpu_cache, svm);
>>> -     /*
>>> -      * The vmcb page can be recycled, causing a false negative in
>>> -      * svm_vcpu_load(). So do a full IBPB now.
>>> -      */
>>> -     indirect_branch_prediction_barrier();
>>>  }
>>>
>>>  static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>>> --
>>> 2.17.0.441.gb46fe60e1d-goog
>>>
> 
> Ping.
> 

Queued, thanks.

Paolo
diff mbox

Patch

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 220e5a89465a..ffa27f75e323 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2172,21 +2172,31 @@  static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	return ERR_PTR(err);
 }
 
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+	int i;
+
+	for_each_online_cpu(i)
+		cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
+	/*
+	 * The vmcb page can be recycled, causing a false negative in
+	 * svm_vcpu_load(). So, ensure that no logical CPU has this
+	 * vmcb page recorded as its current vmcb.
+	 */
+	svm_clear_current_vmcb(svm->vmcb);
+
 	__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
-	/*
-	 * The vmcb page can be recycled, causing a false negative in
-	 * svm_vcpu_load(). So do a full IBPB now.
-	 */
-	indirect_branch_prediction_barrier();
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)