Message ID | 20240207172646.3981-16-xin3.li@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Enable FRED with KVM VMX | expand |
On Thu, Feb 08, 2024 at 01:26:35AM +0800, Xin Li wrote: >Add FRED related VMCS fields to dump_vmcs() to have it dump FRED context. > >Signed-off-by: Xin Li <xin3.li@intel.com> >Tested-by: Shan Kang <shan.kang@intel.com> >--- > >Change since v1: >* Use kvm_cpu_cap_has() instead of cpu_feature_enabled() (Chao Gao). >* Dump guest FRED states only if guest has FRED enabled (Nikolay Borisov). >--- > arch/x86/kvm/vmx/vmx.c | 46 +++++++++++++++++++++++++++++++++++------- > 1 file changed, 39 insertions(+), 7 deletions(-) > >diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >index a484b9ac2400..e3409607122d 100644 >--- a/arch/x86/kvm/vmx/vmx.c >+++ b/arch/x86/kvm/vmx/vmx.c >@@ -6392,7 +6392,7 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > struct vcpu_vmx *vmx = to_vmx(vcpu); > u32 vmentry_ctl, vmexit_ctl; > u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; >- u64 tertiary_exec_control; >+ u64 tertiary_exec_control, secondary_vmexit_ctl; > unsigned long cr4; > int efer_slot; > >@@ -6403,6 +6403,8 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > > vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); > vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); >+ secondary_vmexit_ctl = cpu_has_secondary_vmexit_ctrls() ? >+ vmcs_read64(SECONDARY_VM_EXIT_CONTROLS) : 0; > cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); > pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); > cr4 = vmcs_readl(GUEST_CR4); >@@ -6449,6 +6451,19 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); > vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); > vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); >+#ifdef CONFIG_X86_64 >+ if (kvm_is_fred_enabled(vcpu)) { FRED MSRs are accessible even if CR4.FRED isn't set and #ifdef is ugly, I think you can simply do: if (vmentry_ctrl & VM_ENTRY_LOAD_IA32_FRED) just like below handling for EFER/PAT etc. >+ pr_err("FRED guest: config=0x%016llx, stack levels=0x%016llx\n" >+ "RSP0=0x%016lx, RSP1=0x%016llx\n" >+ "RSP2=0x%016llx, RSP3=0x%016llx\n", >+ vmcs_read64(GUEST_IA32_FRED_CONFIG), >+ vmcs_read64(GUEST_IA32_FRED_STKLVLS), >+ read_msr(MSR_IA32_FRED_RSP0), >+ vmcs_read64(GUEST_IA32_FRED_RSP1), >+ vmcs_read64(GUEST_IA32_FRED_RSP2), >+ vmcs_read64(GUEST_IA32_FRED_RSP3)); >+ } >+#endif > efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); > if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) > pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); >@@ -6496,6 +6511,19 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > vmcs_readl(HOST_TR_BASE)); > pr_err("GDTBase=%016lx IDTBase=%016lx\n", > vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); >+#ifdef CONFIG_X86_64 >+ if (kvm_cpu_cap_has(X86_FEATURE_FRED)) { ditto >+ pr_err("FRED host: config=0x%016llx, stack levels=0x%016llx\n" >+ "RSP0=0x%016llx, RSP1=0x%016llx\n" >+ "RSP2=0x%016llx, RSP3=0x%016llx\n", >+ vmcs_read64(HOST_IA32_FRED_CONFIG), >+ vmcs_read64(HOST_IA32_FRED_STKLVLS), >+ vmx->msr_host_fred_rsp0, >+ vmcs_read64(HOST_IA32_FRED_RSP1), >+ vmcs_read64(HOST_IA32_FRED_RSP2), >+ vmcs_read64(HOST_IA32_FRED_RSP3)); >+ } >+#endif > pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", > vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), > vmcs_readl(HOST_CR4)); >@@ -6517,25 +6545,29 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > pr_err("*** Control State ***\n"); > pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n", > cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control); >- pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n", >- pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl); >+ pr_err("PinBased=0x%08x EntryControls=0x%08x\n", >+ pin_based_exec_ctrl, vmentry_ctl); >+ pr_err("ExitControls=0x%08x SecondaryExitControls=0x%016llx\n", >+ vmexit_ctl, secondary_vmexit_ctl); > pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", > vmcs_read32(EXCEPTION_BITMAP), > vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), > vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); >- pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", >+ pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x event data=%016llx\n", s/event data/event_data/ > vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), > vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), >- vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); >+ vmcs_read32(VM_ENTRY_INSTRUCTION_LEN), >+ kvm_cpu_cap_has(X86_FEATURE_FRED) ? vmcs_read64(INJECTED_EVENT_DATA) : 0); again, it is better to check some vmexit/vmentry ctrl bit. > pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", > vmcs_read32(VM_EXIT_INTR_INFO), > vmcs_read32(VM_EXIT_INTR_ERROR_CODE), > vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); > pr_err(" reason=%08x qualification=%016lx\n", > vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); >- pr_err("IDTVectoring: info=%08x errcode=%08x\n", >+ pr_err("IDTVectoring: info=%08x errcode=%08x event data=%016llx\n", s/event data/event_data/ > vmcs_read32(IDT_VECTORING_INFO_FIELD), >- vmcs_read32(IDT_VECTORING_ERROR_CODE)); >+ vmcs_read32(IDT_VECTORING_ERROR_CODE), >+ kvm_cpu_cap_has(X86_FEATURE_FRED) ? vmcs_read64(ORIGINAL_EVENT_DATA) : 0); ditto
On Tue, Apr 30, 2024, Chao Gao wrote: > On Thu, Feb 08, 2024 at 01:26:35AM +0800, Xin Li wrote: > >@@ -6449,6 +6451,19 @@ void dump_vmcs(struct kvm_vcpu *vcpu) > > vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); > > vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); > > vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); > >+#ifdef CONFIG_X86_64 > >+ if (kvm_is_fred_enabled(vcpu)) { > > FRED MSRs are accessible even if CR4.FRED isn't set and #ifdef is ugly, I think > you can simply do: > > if (vmentry_ctrl & VM_ENTRY_LOAD_IA32_FRED) > > just like below handling for EFER/PAT etc. +1
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a484b9ac2400..e3409607122d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6392,7 +6392,7 @@ void dump_vmcs(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmentry_ctl, vmexit_ctl; u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control; - u64 tertiary_exec_control; + u64 tertiary_exec_control, secondary_vmexit_ctl; unsigned long cr4; int efer_slot; @@ -6403,6 +6403,8 @@ void dump_vmcs(struct kvm_vcpu *vcpu) vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); + secondary_vmexit_ctl = cpu_has_secondary_vmexit_ctrls() ? + vmcs_read64(SECONDARY_VM_EXIT_CONTROLS) : 0; cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); cr4 = vmcs_readl(GUEST_CR4); @@ -6449,6 +6451,19 @@ void dump_vmcs(struct kvm_vcpu *vcpu) vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); +#ifdef CONFIG_X86_64 + if (kvm_is_fred_enabled(vcpu)) { + pr_err("FRED guest: config=0x%016llx, stack levels=0x%016llx\n" + "RSP0=0x%016lx, RSP1=0x%016llx\n" + "RSP2=0x%016llx, RSP3=0x%016llx\n", + vmcs_read64(GUEST_IA32_FRED_CONFIG), + vmcs_read64(GUEST_IA32_FRED_STKLVLS), + read_msr(MSR_IA32_FRED_RSP0), + vmcs_read64(GUEST_IA32_FRED_RSP1), + vmcs_read64(GUEST_IA32_FRED_RSP2), + vmcs_read64(GUEST_IA32_FRED_RSP3)); + } +#endif efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER); if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER) pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER)); @@ -6496,6 +6511,19 @@ void dump_vmcs(struct kvm_vcpu *vcpu) vmcs_readl(HOST_TR_BASE)); pr_err("GDTBase=%016lx IDTBase=%016lx\n", vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); +#ifdef CONFIG_X86_64 + if (kvm_cpu_cap_has(X86_FEATURE_FRED)) { + pr_err("FRED host: config=0x%016llx, stack levels=0x%016llx\n" + "RSP0=0x%016llx, RSP1=0x%016llx\n" + "RSP2=0x%016llx, RSP3=0x%016llx\n", + vmcs_read64(HOST_IA32_FRED_CONFIG), + vmcs_read64(HOST_IA32_FRED_STKLVLS), + vmx->msr_host_fred_rsp0, + vmcs_read64(HOST_IA32_FRED_RSP1), + vmcs_read64(HOST_IA32_FRED_RSP2), + vmcs_read64(HOST_IA32_FRED_RSP3)); + } +#endif pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), vmcs_readl(HOST_CR4)); @@ -6517,25 +6545,29 @@ void dump_vmcs(struct kvm_vcpu *vcpu) pr_err("*** Control State ***\n"); pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n", cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control); - pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n", - pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl); + pr_err("PinBased=0x%08x EntryControls=0x%08x\n", + pin_based_exec_ctrl, vmentry_ctl); + pr_err("ExitControls=0x%08x SecondaryExitControls=0x%016llx\n", + vmexit_ctl, secondary_vmexit_ctl); pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", vmcs_read32(EXCEPTION_BITMAP), vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); - pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", + pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x event data=%016llx\n", vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), - vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); + vmcs_read32(VM_ENTRY_INSTRUCTION_LEN), + kvm_cpu_cap_has(X86_FEATURE_FRED) ? vmcs_read64(INJECTED_EVENT_DATA) : 0); pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_EXIT_INTR_INFO), vmcs_read32(VM_EXIT_INTR_ERROR_CODE), vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); pr_err(" reason=%08x qualification=%016lx\n", vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); - pr_err("IDTVectoring: info=%08x errcode=%08x\n", + pr_err("IDTVectoring: info=%08x errcode=%08x event data=%016llx\n", vmcs_read32(IDT_VECTORING_INFO_FIELD), - vmcs_read32(IDT_VECTORING_ERROR_CODE)); + vmcs_read32(IDT_VECTORING_ERROR_CODE), + kvm_cpu_cap_has(X86_FEATURE_FRED) ? vmcs_read64(ORIGINAL_EVENT_DATA) : 0); pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) pr_err("TSC Multiplier = 0x%016llx\n",