diff mbox series

KVM: nSVM: Optimize L12 to L2 vmcb.save copies

Message ID 20210301200844.2000-1-cavery@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: Optimize L12 to L2 vmcb.save copies | expand

Commit Message

Cathy Avery March 1, 2021, 8:08 p.m. UTC
Use the vmcb12 control clean field to determine which vmcb12.save
registers were marked dirty in order to minimize register copies
when switching from L1 to L2. Those L12 registers marked as dirty need
to be copied to L2's vmcb as they will be used to update the vmcb
state cache for the L2 VMRUN.  In the case where we have a different
vmcb12 from the last L2 VMRUN all vmcb12.save registers must be
copied over to L2.save.

Tested:
kvm-unit-tests
kvm selftests
Fedora L1 L2

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Cathy Avery <cavery@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 43 ++++++++++++++++++++++++++-------------
 arch/x86/kvm/svm/svm.c    |  1 +
 arch/x86/kvm/svm/svm.h    |  6 ++++++
 3 files changed, 36 insertions(+), 14 deletions(-)

Comments

Sean Christopherson March 2, 2021, 12:59 a.m. UTC | #1
On Mon, Mar 01, 2021, Cathy Avery wrote:
>  	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
>  	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
>  	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
>  	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);

Why not utilize VMCB_CR?

> -	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
> +	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;

Same question for VMCB_CR2.

Also, isn't writing svm->vmcb->save.cr2 unnecessary since svm_vcpu_run()
unconditionally writes it?

Alternatively, it shouldn't be too much work to add proper dirty tracking for
CR2.  VMX has to write the real CR2 every time because there's no VMCS field,
but I assume can avoid the write and dirty update on the majority of VMRUNs.

> +
>  	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
>  	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
>  	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
>  
>  	/* In case we don't even reach vcpu_run, the fields are not updated */
> -	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2;
>  	svm->vmcb->save.rax = vmcb12->save.rax;
>  	svm->vmcb->save.rsp = vmcb12->save.rsp;
>  	svm->vmcb->save.rip = vmcb12->save.rip;
>  
> -	svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
> -	svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
> -	vmcb_mark_dirty(svm->vmcb, VMCB_DR);
> +	/* These bits will be set properly on the first execution when new_vmc12 is true */
> +	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
> +		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
> +		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
> +		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
> +	}
>  }
>  
>  static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 54610270f66a..9761a7ca8100 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1232,6 +1232,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
>  	svm->asid = 0;
>  
>  	svm->nested.vmcb12_gpa = 0;
> +	svm->nested.last_vmcb12_gpa = 0;

We should use INVALID_PAGE, '0' is a legal physical address and could
theoretically get a false negative on the "new_vmcb12" check.

>  	vcpu->arch.hflags = 0;
>  
>  	if (!kvm_pause_in_guest(vcpu->kvm)) {
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index fbbb26dd0f73..911868d4584c 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -93,6 +93,7 @@ struct svm_nested_state {
>  	u64 hsave_msr;
>  	u64 vm_cr_msr;
>  	u64 vmcb12_gpa;
> +	u64 last_vmcb12_gpa;
>  
>  	/* These are the merged vectors */
>  	u32 *msrpm;
> @@ -247,6 +248,11 @@ static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
>  	vmcb->control.clean &= ~(1 << bit);
>  }
>  
> +static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
> +{
> +        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
> +}
> +
>  static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
>  {
>  	return container_of(vcpu, struct vcpu_svm, vcpu);
> -- 
> 2.26.2
>
Cathy Avery March 2, 2021, 12:56 p.m. UTC | #2
On 3/1/21 7:59 PM, Sean Christopherson wrote:
> On Mon, Mar 01, 2021, Cathy Avery wrote:
>>   	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
>>   	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
>>   	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
>>   	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
> Why not utilize VMCB_CR?
I was going to tackle CR in a follow up patch. I should have mentioned 
that but it makes sense to go ahead and do it now.
>
>> -	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
>> +	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
> Same question for VMCB_CR2.
>
> Also, isn't writing svm->vmcb->save.cr2 unnecessary since svm_vcpu_run()
> unconditionally writes it?
>
> Alternatively, it shouldn't be too much work to add proper dirty tracking for
> CR2.  VMX has to write the real CR2 every time because there's no VMCS field,
> but I assume can avoid the write and dirty update on the majority of VMRUNs.

I 'll take a look at CR2 as well.

Thanks for the feedback,

Cathy

>
>> +
>>   	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
>>   	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
>>   	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
>>   
>>   	/* In case we don't even reach vcpu_run, the fields are not updated */
>> -	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2;
>>   	svm->vmcb->save.rax = vmcb12->save.rax;
>>   	svm->vmcb->save.rsp = vmcb12->save.rsp;
>>   	svm->vmcb->save.rip = vmcb12->save.rip;
>>   
>> -	svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
>> -	svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
>> -	vmcb_mark_dirty(svm->vmcb, VMCB_DR);
>> +	/* These bits will be set properly on the first execution when new_vmc12 is true */
>> +	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
>> +		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
>> +		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
>> +		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
>> +	}
>>   }
>>   
>>   static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>> index 54610270f66a..9761a7ca8100 100644
>> --- a/arch/x86/kvm/svm/svm.c
>> +++ b/arch/x86/kvm/svm/svm.c
>> @@ -1232,6 +1232,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
>>   	svm->asid = 0;
>>   
>>   	svm->nested.vmcb12_gpa = 0;
>> +	svm->nested.last_vmcb12_gpa = 0;
> We should use INVALID_PAGE, '0' is a legal physical address and could
> theoretically get a false negative on the "new_vmcb12" check.
>
>>   	vcpu->arch.hflags = 0;
>>   
>>   	if (!kvm_pause_in_guest(vcpu->kvm)) {
>> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
>> index fbbb26dd0f73..911868d4584c 100644
>> --- a/arch/x86/kvm/svm/svm.h
>> +++ b/arch/x86/kvm/svm/svm.h
>> @@ -93,6 +93,7 @@ struct svm_nested_state {
>>   	u64 hsave_msr;
>>   	u64 vm_cr_msr;
>>   	u64 vmcb12_gpa;
>> +	u64 last_vmcb12_gpa;
>>   
>>   	/* These are the merged vectors */
>>   	u32 *msrpm;
>> @@ -247,6 +248,11 @@ static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
>>   	vmcb->control.clean &= ~(1 << bit);
>>   }
>>   
>> +static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
>> +{
>> +        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
>> +}
>> +
>>   static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
>>   {
>>   	return container_of(vcpu, struct vcpu_svm, vcpu);
>> -- 
>> 2.26.2
>>
Paolo Bonzini March 2, 2021, 2:18 p.m. UTC | #3
On 02/03/21 13:56, Cathy Avery wrote:
> On 3/1/21 7:59 PM, Sean Christopherson wrote:
>> On Mon, Mar 01, 2021, Cathy Avery wrote:
>  	svm->nested.vmcb12_gpa = 0;
> +	svm->nested.last_vmcb12_gpa = 0;


This should not be 0 to avoid a false match.  "-1" should be okay.

>>>       kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | 
>>> X86_EFLAGS_FIXED);
>>>       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
>>>       svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
>>>       svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
>> Why not utilize VMCB_CR?
> I was going to tackle CR in a follow up patch. I should have mentioned 
> that but it makes sense to go ahead and do it now.

There is some trickiness.  For example, I would first prefer to move the 
checks on svm->vmcb->save.cr0 == vcpu->arch.cr0 ("hcr0 == cr0" in 
svm_set_cr0) to recalc_intercepts.

For cr4, instead, we need to go through kvm_update_cpuid_runtime in case 
host CR4 is not equal to CR4 (for which we have a testcase in svm.flat 
already, I think).

>>> -    svm->vcpu.arch.cr2 = vmcb12->save.cr2;
>>> +    svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
>> Same question for VMCB_CR2.
>>
>> Also, isn't writing svm->vmcb->save.cr2 unnecessary since svm_vcpu_run()
>> unconditionally writes it?
>>
>> Alternatively, it shouldn't be too much work to add proper dirty 
>> tracking for CR2.  VMX has to write the real CR2 every time because there's no VMCS 
>> field, but I assume can avoid the write and dirty update on the majority of 
>> VMRUNs.
> 
> I'll take a look at CR2 as well.

That's a separate patch, to some extent unrelated to nesting.  Feel free 
to look at it, but for now we should apply this part with only the 
svm->vmcb->save.cr2 assignment removed.  Please send a v2, thanks!

Paolo

> Thanks for the feedback,
> 
> Cathy
> 
>>
>>> +
>>>       kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
>>>       kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
>>>       kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
>>>       /* In case we don't even reach vcpu_run, the fields are not 
>>> updated */
>>> -    svm->vmcb->save.cr2 = svm->vcpu.arch.cr2;
>>>       svm->vmcb->save.rax = vmcb12->save.rax;
>>>       svm->vmcb->save.rsp = vmcb12->save.rsp;
>>>       svm->vmcb->save.rip = vmcb12->save.rip;
>>> -    svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
>>> -    svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
>>> -    vmcb_mark_dirty(svm->vmcb, VMCB_DR);
>>> +    /* These bits will be set properly on the first execution when 
>>> new_vmc12 is true */
>>> +    if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
>>> +        svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
>>> +        svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
>>> +        vmcb_mark_dirty(svm->vmcb, VMCB_DR);
>>> +    }
>>>   }
>>>   static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
>>> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
>>> index 54610270f66a..9761a7ca8100 100644
>>> --- a/arch/x86/kvm/svm/svm.c
>>> +++ b/arch/x86/kvm/svm/svm.c
>>> @@ -1232,6 +1232,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
>>>       svm->asid = 0;
>>>       svm->nested.vmcb12_gpa = 0;
>>> +    svm->nested.last_vmcb12_gpa = 0;
>> We should use INVALID_PAGE, '0' is a legal physical address and could
>> theoretically get a false negative on the "new_vmcb12" check.
>>
>>>       vcpu->arch.hflags = 0;
>>>       if (!kvm_pause_in_guest(vcpu->kvm)) {
>>> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
>>> index fbbb26dd0f73..911868d4584c 100644
>>> --- a/arch/x86/kvm/svm/svm.h
>>> +++ b/arch/x86/kvm/svm/svm.h
>>> @@ -93,6 +93,7 @@ struct svm_nested_state {
>>>       u64 hsave_msr;
>>>       u64 vm_cr_msr;
>>>       u64 vmcb12_gpa;
>>> +    u64 last_vmcb12_gpa;
>>>       /* These are the merged vectors */
>>>       u32 *msrpm;
>>> @@ -247,6 +248,11 @@ static inline void vmcb_mark_dirty(struct vmcb 
>>> *vmcb, int bit)
>>>       vmcb->control.clean &= ~(1 << bit);
>>>   }
>>> +static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
>>> +{
>>> +        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
>>> +}
>>> +
>>>   static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
>>>   {
>>>       return container_of(vcpu, struct vcpu_svm, vcpu);
>>> -- 
>>> 2.26.2
>>>
>
Paolo Bonzini March 2, 2021, 2:20 p.m. UTC | #4
On 02/03/21 01:59, Sean Christopherson wrote:
>> +	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
> Same question for VMCB_CR2.

Besides the question of how much AMD processors actually use the clean 
bits (a quick test suggests "not much"), in this specific case I suspect 
that the check would be more expensive than the savings from a memory load.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 90a1704b5752..c1d5944ee473 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -422,39 +422,54 @@  void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
 
 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
+	bool new_vmcb12 = false;
+
 	nested_vmcb02_compute_g_pat(svm);
 
 	/* Load the nested guest state */
-	svm->vmcb->save.es = vmcb12->save.es;
-	svm->vmcb->save.cs = vmcb12->save.cs;
-	svm->vmcb->save.ss = vmcb12->save.ss;
-	svm->vmcb->save.ds = vmcb12->save.ds;
-	svm->vmcb->save.cpl = vmcb12->save.cpl;
-	vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
 
-	svm->vmcb->save.gdtr = vmcb12->save.gdtr;
-	svm->vmcb->save.idtr = vmcb12->save.idtr;
-	vmcb_mark_dirty(svm->vmcb, VMCB_DT);
+	if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
+		new_vmcb12 = true;
+		svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
+	}
+
+	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
+		svm->vmcb->save.es = vmcb12->save.es;
+		svm->vmcb->save.cs = vmcb12->save.cs;
+		svm->vmcb->save.ss = vmcb12->save.ss;
+		svm->vmcb->save.ds = vmcb12->save.ds;
+		svm->vmcb->save.cpl = vmcb12->save.cpl;
+		vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
+	}
+
+	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
+		svm->vmcb->save.gdtr = vmcb12->save.gdtr;
+		svm->vmcb->save.idtr = vmcb12->save.idtr;
+		vmcb_mark_dirty(svm->vmcb, VMCB_DT);
+	}
 
 	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
 	svm_set_efer(&svm->vcpu, vmcb12->save.efer);
 	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
 	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
 
-	svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+
 	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
 	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
 	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
 
 	/* In case we don't even reach vcpu_run, the fields are not updated */
-	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2;
 	svm->vmcb->save.rax = vmcb12->save.rax;
 	svm->vmcb->save.rsp = vmcb12->save.rsp;
 	svm->vmcb->save.rip = vmcb12->save.rip;
 
-	svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
-	svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
-	vmcb_mark_dirty(svm->vmcb, VMCB_DR);
+	/* These bits will be set properly on the first execution when new_vmc12 is true */
+	if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
+		svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
+		svm->vcpu.arch.dr6  = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
+		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
+	}
 }
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 54610270f66a..9761a7ca8100 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1232,6 +1232,7 @@  static void init_vmcb(struct kvm_vcpu *vcpu)
 	svm->asid = 0;
 
 	svm->nested.vmcb12_gpa = 0;
+	svm->nested.last_vmcb12_gpa = 0;
 	vcpu->arch.hflags = 0;
 
 	if (!kvm_pause_in_guest(vcpu->kvm)) {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fbbb26dd0f73..911868d4584c 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -93,6 +93,7 @@  struct svm_nested_state {
 	u64 hsave_msr;
 	u64 vm_cr_msr;
 	u64 vmcb12_gpa;
+	u64 last_vmcb12_gpa;
 
 	/* These are the merged vectors */
 	u32 *msrpm;
@@ -247,6 +248,11 @@  static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
 	vmcb->control.clean &= ~(1 << bit);
 }
 
+static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
+{
+        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_svm, vcpu);