diff mbox series

[v3,2/7] KVM: x86: nSVM: implement nested LBR virtualization

Message ID 20220301143650.143749-3-mlevitsk@redhat.com (mailing list archive)
State New, archived
Headers show
Series nSVM/SVM features | expand

Commit Message

Maxim Levitsky March 1, 2022, 2:36 p.m. UTC
This was tested with kvm-unit-test that was developed
for this purpose.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 21 +++++++++++++++++++--
 arch/x86/kvm/svm/svm.c    |  8 ++++++++
 arch/x86/kvm/svm/svm.h    |  1 +
 3 files changed, 28 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini March 9, 2022, 1 p.m. UTC | #1
On 3/1/22 15:36, Maxim Levitsky wrote:
> @@ -565,8 +565,19 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
>   		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
>   	}
>   
> -	if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK))
> +	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
> +
> +		/* Copy LBR related registers from vmcb12,
> +		 * but make sure that we only pick LBR enable bit from the guest.
> +		 */
> +
> +		svm_copy_lbrs(vmcb12, svm->vmcb);
> +		svm->vmcb->save.dbgctl &= LBR_CTL_ENABLE_MASK;

This should be checked against DEBUGCTL_RESERVED_BITS instead in 
__nested_vmcb_check_save; remember to add dbgctl to struct 
vmcb_save_area_cached too.

Paolo

> +		svm_update_lbrv(&svm->vcpu);
> +
> +	} else if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
>   		svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
> +	}
>   }
Maxim Levitsky March 22, 2022, 4:53 p.m. UTC | #2
On Wed, 2022-03-09 at 14:00 +0100, Paolo Bonzini wrote:
> On 3/1/22 15:36, Maxim Levitsky wrote:
> > @@ -565,8 +565,19 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
> >   		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
> >   	}
> >   
> > -	if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK))
> > +	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
> > +
> > +		/* Copy LBR related registers from vmcb12,
> > +		 * but make sure that we only pick LBR enable bit from the guest.
> > +		 */
> > +
> > +		svm_copy_lbrs(vmcb12, svm->vmcb);
> > +		svm->vmcb->save.dbgctl &= LBR_CTL_ENABLE_MASK;
> 
> This should be checked against DEBUGCTL_RESERVED_BITS instead in 
> __nested_vmcb_check_save; remember to add dbgctl to struct 
> vmcb_save_area_cached too.

Actually the AMD's manual doesn't specify what happens when this field contains reserved bits.

I did the following test and I see that CPU ignores the reserved bits:


diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fa7366fab3bd6..6a9f41941d9ea 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3875,6 +3875,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
                x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
+       svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
+       svm->vmcb->save.dbgctl = 0xFFFFFFFFFFFFFFFFUL;
+
        svm_vcpu_enter_exit(vcpu);
 
        /*


The VM booted fine and reading DEBUGCTL in the guest returned 0x3f.

Its true that DEBUGCTL has few more bits that are defined on AMD which
I zero here, but for practical purposes only bit that matters is BTF,
and I decided that for now to leave it alone, as currently KVM doesn't
emulate it correctly anyway when LBRs are not enabled.
(look at svm_set_msr, MSR_IA32_DEBUGCTLMSR)

In theory this bit should be passed through by setting host's DEBUGCTL,
just prior to entry to the guest, when only it is enabled,
without LBR virtualization. I'll fix this later.

Best regards,
	Maxim Levitsky

> 
> Paolo
> 
> > +		svm_update_lbrv(&svm->vcpu);
> > +
> > +	} else if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
> >   		svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
> > +	}
> >   }
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index d026f89ee94e6..12e856bfce408 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -565,8 +565,19 @@  static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
 		vmcb_mark_dirty(svm->vmcb, VMCB_DR);
 	}
 
-	if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK))
+	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+
+		/* Copy LBR related registers from vmcb12,
+		 * but make sure that we only pick LBR enable bit from the guest.
+		 */
+
+		svm_copy_lbrs(vmcb12, svm->vmcb);
+		svm->vmcb->save.dbgctl &= LBR_CTL_ENABLE_MASK;
+		svm_update_lbrv(&svm->vcpu);
+
+	} else if (unlikely(svm->vmcb01.ptr->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
 		svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
+	}
 }
 
 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
@@ -626,6 +637,9 @@  static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
 
 	svm->vmcb->control.virt_ext            = svm->vmcb01.ptr->control.virt_ext &
 						 LBR_CTL_ENABLE_MASK;
+	if (svm->lbrv_enabled)
+		svm->vmcb->control.virt_ext  |=
+			(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
 
 	nested_svm_transition_tlb_flush(vcpu);
 
@@ -889,7 +903,10 @@  int nested_svm_vmexit(struct vcpu_svm *svm)
 
 	svm_switch_vmcb(svm, &svm->vmcb01);
 
-	if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
+	if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+		svm_copy_lbrs(svm->nested.vmcb02.ptr, vmcb12);
+		svm_update_lbrv(vcpu);
+	} else if (unlikely(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
 		svm_copy_lbrs(svm->nested.vmcb02.ptr, svm->vmcb);
 		svm_update_lbrv(vcpu);
 	}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index aa6c04d15d2a7..8d87f84f93f30 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -888,6 +888,10 @@  void svm_update_lbrv(struct kvm_vcpu *vcpu)
 	bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
 				      LBR_CTL_ENABLE_MASK);
 
+	if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
+		if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
+			enable_lbrv = true;
+
 	if (enable_lbrv == current_enable_lbrv)
 		return;
 
@@ -3998,6 +4002,7 @@  static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 			     guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
 
 	svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
+	svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
 
 	svm_recalc_instruction_intercepts(vcpu, svm);
 
@@ -4748,6 +4753,9 @@  static __init void svm_set_cpu_caps(void)
 		if (tsc_scaling)
 			kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
 
+		if (lbrv)
+			kvm_cpu_cap_set(X86_FEATURE_LBRV);
+
 		/* Nested VM can receive #VMEXIT instead of triggering #GP */
 		kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
 	}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 2d0487968cba4..86dc1f997e01d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -232,6 +232,7 @@  struct vcpu_svm {
 	/* cached guest cpuid flags for faster access */
 	bool nrips_enabled                : 1;
 	bool tsc_scaling_enabled          : 1;
+	bool lbrv_enabled                 : 1;
 
 	u32 ldr_reg;
 	u32 dfr_reg;