@@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER)
KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD)
KVM_GOVERNED_X86_FEATURE(VGIF)
KVM_GOVERNED_X86_FEATURE(VNMI)
+KVM_GOVERNED_X86_FEATURE(VIBS)
#undef KVM_GOVERNED_X86_FEATURE
#undef KVM_GOVERNED_FEATURE
@@ -616,6 +616,16 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
svm_copy_lbrs(vmcb02, vmcb01);
}
+
+ if (guest_can_use(vcpu, X86_FEATURE_VIBS) &&
+ !(vmcb12->control.virt_ext & VIRTUAL_IBS_ENABLE_MASK))
+ vmcb02->control.virt_ext = vmcb12->control.virt_ext & ~VIRTUAL_IBS_ENABLE_MASK;
+
+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_VIBS) &&
+ (svm->nested.ctl.virt_ext & VIRTUAL_IBS_ENABLE_MASK)))
+ svm_copy_ibs(vmcb02, vmcb12);
+ else if (unlikely(vmcb01->control.virt_ext & VIRTUAL_IBS_ENABLE_MASK))
+ svm_copy_ibs(vmcb02, vmcb01);
}
static inline bool is_evtinj_soft(u32 evtinj)
@@ -741,6 +751,13 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.virt_ext |=
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
+ vmcb02->control.virt_ext = vmcb01->control.virt_ext & VIRTUAL_IBS_ENABLE_MASK;
+
+ if (guest_can_use(vcpu, X86_FEATURE_VIBS))
+ vmcb02->control.virt_ext |= (svm->nested.ctl.virt_ext & VIRTUAL_IBS_ENABLE_MASK);
+ else
+ vmcb02->control.virt_ext &= ~VIRTUAL_IBS_ENABLE_MASK;
+
if (!nested_vmcb_needs_vls_intercept(svm))
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@@ -1083,6 +1100,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm_update_lbrv(vcpu);
}
+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_VIBS) &&
+ (svm->nested.ctl.virt_ext & VIRTUAL_IBS_ENABLE_MASK)))
+ svm_copy_ibs(vmcb12, vmcb02);
+ else if (unlikely(vmcb01->control.virt_ext & VIRTUAL_IBS_ENABLE_MASK))
+ svm_copy_ibs(vmcb01, vmcb02);
+
if (vnmi) {
if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
@@ -1084,6 +1084,20 @@ void svm_ibs_msr_interception(struct vcpu_svm *svm, bool intercept)
set_msr_interception(&svm->vcpu, svm->msrpm, MSR_AMD64_ICIBSEXTDCTL, !intercept, !intercept);
}
+void svm_copy_ibs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
+{
+ to_vmcb->save.ibs_fetch_ctl = from_vmcb->save.ibs_fetch_ctl;
+ to_vmcb->save.ibs_fetch_linear_addr = from_vmcb->save.ibs_fetch_linear_addr;
+ to_vmcb->save.ibs_op_ctl = from_vmcb->save.ibs_op_ctl;
+ to_vmcb->save.ibs_op_rip = from_vmcb->save.ibs_op_rip;
+ to_vmcb->save.ibs_op_data = from_vmcb->save.ibs_op_data;
+ to_vmcb->save.ibs_op_data2 = from_vmcb->save.ibs_op_data2;
+ to_vmcb->save.ibs_op_data3 = from_vmcb->save.ibs_op_data3;
+ to_vmcb->save.ibs_dc_linear_addr = from_vmcb->save.ibs_dc_linear_addr;
+ to_vmcb->save.ibs_br_target = from_vmcb->save.ibs_br_target;
+ to_vmcb->save.ibs_fetch_extd_ctl = from_vmcb->save.ibs_fetch_extd_ctl;
+}
+
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4441,6 +4455,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VIBS);
svm_recalc_instruction_intercepts(vcpu, svm);
@@ -5225,6 +5240,9 @@ static __init void svm_set_cpu_caps(void)
if (vnmi)
kvm_cpu_cap_set(X86_FEATURE_VNMI);
+ if (vibs)
+ kvm_cpu_cap_set(X86_FEATURE_VIBS);
+
/* Nested VM can receive #VMEXIT instead of triggering #GP */
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
@@ -584,6 +584,7 @@ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
+void svm_copy_ibs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
To handle the case where IBS is enabled for L1 and L2, IBS MSRs are copied from vmcb12 to vmcb02 during vmentry and vice-versa during vmexit. To handle the case where IBS is enabled for L1 but _not_ for L2, IBS MSRs are copied from vmcb01 to vmcb02 during vmentry and vice-versa during vmexit. Signed-off-by: Manali Shukla <manali.shukla@amd.com> --- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/svm/nested.c | 23 +++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 18 ++++++++++++++++++ arch/x86/kvm/svm/svm.h | 1 + 4 files changed, 43 insertions(+)