diff mbox series

[10/30] KVM: nSVM: extract preparation of VMCB for nested run

Message ID 20200529153934.11694-11-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: event fixes and migration support | expand

Commit Message

Paolo Bonzini May 29, 2020, 3:39 p.m. UTC
Split out filling svm->vmcb.save and svm->vmcb.control before VMRUN.
Only the latter will be useful when restoring nested SVM state.

This patch introduces no semantic change, so the MMU setup is still
done in nested_prepare_vmcb_save.  The next patch will clean up things.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 40 +++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

Comments

Krish Sadhukhan May 29, 2020, 6:27 p.m. UTC | #1
On 5/29/20 8:39 AM, Paolo Bonzini wrote:
> Split out filling svm->vmcb.save and svm->vmcb.control before VMRUN.
> Only the latter will be useful when restoring nested SVM state.
>
> This patch introduces no semantic change, so the MMU setup is still
> done in nested_prepare_vmcb_save.  The next patch will clean up things.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>   arch/x86/kvm/svm/nested.c | 40 +++++++++++++++++++++++----------------
>   1 file changed, 24 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index fc0c6d1678eb..73be7af79453 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -245,21 +245,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
>   	svm->vcpu.arch.tsc_offset += control->tsc_offset;
>   }
>   
> -void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
> -			  struct vmcb *nested_vmcb)
> +static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)


Not a big deal, but I feel that it helps a lot in readability if we keep 
the names symmetric. This one could be named prepare_nested_vmcb_save to 
match load_nested_vmcb_control that you created in the previous patch. 
Or load_nested_vmcb_control could be renamed to nested_load_vmcb_control 
to match the name here.

>   {
> -	bool evaluate_pending_interrupts =
> -		is_intercept(svm, INTERCEPT_VINTR) ||
> -		is_intercept(svm, INTERCEPT_IRET);
> -
> -	svm->nested.vmcb = vmcb_gpa;
> -	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
> -		svm->vcpu.arch.hflags |= HF_HIF_MASK;
> -	else
> -		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
> -
> -	load_nested_vmcb_control(svm, &nested_vmcb->control);
> -
>   	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
>   		nested_svm_init_mmu_context(&svm->vcpu);
>   
> @@ -291,7 +278,10 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
>   	svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
>   	svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
>   	svm->vmcb->save.cpl = nested_vmcb->save.cpl;
> +}
>   
> +static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
> +{
>   	svm_flush_tlb(&svm->vcpu);
>   	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
>   		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
> @@ -321,6 +311,26 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
>   	 */
>   	recalc_intercepts(svm);
>   
> +	mark_all_dirty(svm->vmcb);
> +}
> +
> +void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
> +			  struct vmcb *nested_vmcb)
> +{
> +	bool evaluate_pending_interrupts =
> +		is_intercept(svm, INTERCEPT_VINTR) ||
> +		is_intercept(svm, INTERCEPT_IRET);
> +
> +	svm->nested.vmcb = vmcb_gpa;
> +	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
> +		svm->vcpu.arch.hflags |= HF_HIF_MASK;
> +	else
> +		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
> +
> +	load_nested_vmcb_control(svm, &nested_vmcb->control);
> +	nested_prepare_vmcb_save(svm, nested_vmcb);
> +	nested_prepare_vmcb_control(svm, nested_vmcb);
> +
>   	/*
>   	 * If L1 had a pending IRQ/NMI before executing VMRUN,
>   	 * which wasn't delivered because it was disallowed (e.g.
> @@ -336,8 +346,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
>   	enable_gif(svm);
>   	if (unlikely(evaluate_pending_interrupts))
>   		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
> -
> -	mark_all_dirty(svm->vmcb);
>   }
>   
>   int nested_svm_vmrun(struct vcpu_svm *svm)
Paolo Bonzini May 29, 2020, 7:02 p.m. UTC | #2
On 29/05/20 20:27, Krish Sadhukhan wrote:
>>
>> +static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct
>> vmcb *nested_vmcb)
> 
> 
> Not a big deal, but I feel that it helps a lot in readability if we keep
> the names symmetric. This one could be named prepare_nested_vmcb_save to
> match load_nested_vmcb_control that you created in the previous patch.
> Or load_nested_vmcb_control could be renamed to nested_load_vmcb_control
> to match the name here.

This is actually intended: while load_nested_vmcb_control loads the
members of nested_vmcb->control into svm->nested, the two functions in
this patch prepare the svm->vmcb.  A couple patches later,
nested_prepare_vmcb_control will not use nested_vmcb anymore.

I could use nested_load_nested_vmcb_control, but that is just too ugly!
 Instead, the best thing to do would be to use the vmcb01/vmcb02/vmcb12
names as in nVMX, in which case the functions would become
nested_load_vmcb12_control and nested_prepare_vmcb02_{save,control}.
However this is a bit hard to do right now because the svm->vmcb acts as
both vmcb01 and vmcb02 depending on what is running.

Thanks,

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fc0c6d1678eb..73be7af79453 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -245,21 +245,8 @@  static void load_nested_vmcb_control(struct vcpu_svm *svm,
 	svm->vcpu.arch.tsc_offset += control->tsc_offset;
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-			  struct vmcb *nested_vmcb)
+static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
 {
-	bool evaluate_pending_interrupts =
-		is_intercept(svm, INTERCEPT_VINTR) ||
-		is_intercept(svm, INTERCEPT_IRET);
-
-	svm->nested.vmcb = vmcb_gpa;
-	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
-		svm->vcpu.arch.hflags |= HF_HIF_MASK;
-	else
-		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
-
-	load_nested_vmcb_control(svm, &nested_vmcb->control);
-
 	if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
 		nested_svm_init_mmu_context(&svm->vcpu);
 
@@ -291,7 +278,10 @@  void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
 	svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
 	svm->vmcb->save.cpl = nested_vmcb->save.cpl;
+}
 
+static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+{
 	svm_flush_tlb(&svm->vcpu);
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -321,6 +311,26 @@  void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	 */
 	recalc_intercepts(svm);
 
+	mark_all_dirty(svm->vmcb);
+}
+
+void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+			  struct vmcb *nested_vmcb)
+{
+	bool evaluate_pending_interrupts =
+		is_intercept(svm, INTERCEPT_VINTR) ||
+		is_intercept(svm, INTERCEPT_IRET);
+
+	svm->nested.vmcb = vmcb_gpa;
+	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
+		svm->vcpu.arch.hflags |= HF_HIF_MASK;
+	else
+		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+
+	load_nested_vmcb_control(svm, &nested_vmcb->control);
+	nested_prepare_vmcb_save(svm, nested_vmcb);
+	nested_prepare_vmcb_control(svm, nested_vmcb);
+
 	/*
 	 * If L1 had a pending IRQ/NMI before executing VMRUN,
 	 * which wasn't delivered because it was disallowed (e.g.
@@ -336,8 +346,6 @@  void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
 	enable_gif(svm);
 	if (unlikely(evaluate_pending_interrupts))
 		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-
-	mark_all_dirty(svm->vmcb);
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)