diff mbox series

[v3,4/5] KVM: x86: dump_vmcs should show the effective EFER

Message ID 20210224132919.2467444-5-david.edmondson@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: dump_vmcs: don't assume GUEST_IA32_EFER, show MSR autoloads/autosaves | expand

Commit Message

David Edmondson Feb. 24, 2021, 1:29 p.m. UTC
If EFER is not being loaded from the VMCS, show the effective value by
reference to the MSR autoload list or calculation.

Signed-off-by: David Edmondson <david.edmondson@oracle.com>
---
 arch/x86/kvm/vmx/vmx.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

Comments

David Edmondson Feb. 24, 2021, 1:33 p.m. UTC | #1
Bah, I will resend the series, as it won't build with patch 4 but
without patch 5.

On Wednesday, 2021-02-24 at 13:29:18 UTC, David Edmondson wrote:

> If EFER is not being loaded from the VMCS, show the effective value by
> reference to the MSR autoload list or calculation.
>
> Signed-off-by: David Edmondson <david.edmondson@oracle.com>
> ---
>  arch/x86/kvm/vmx/vmx.c | 11 +++++++++++
>  1 file changed, 11 insertions(+)
>
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index faeb3d3bd1b8..ed04827a3593 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -5815,6 +5815,7 @@ void dump_vmcs(void)
>  	u32 vmentry_ctl, vmexit_ctl;
>  	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
>  	unsigned long cr4;
> +	int efer_slot;
>  
>  	if (!dump_invalid_vmcs) {
>  		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
> @@ -5860,8 +5861,18 @@ void dump_vmcs(void)
>  	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
>  	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
>  	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
> +	efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
>  	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
>  		pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
> +	else if (efer_slot >= 0)
> +		pr_err("EFER= 0x%016llx (autoload)\n",
> +		       vmx->msr_autoload.guest.val[efer_slot].value);
> +	else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
> +		pr_err("EFER= 0x%016llx (effective)\n",
> +		       vcpu->arch.efer | (EFER_LMA | EFER_LME));
> +	else
> +		pr_err("EFER= 0x%016llx (effective)\n",
> +		       vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
>  	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
>  		pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
>  	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
> -- 
> 2.30.0

dme.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index faeb3d3bd1b8..ed04827a3593 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5815,6 +5815,7 @@  void dump_vmcs(void)
 	u32 vmentry_ctl, vmexit_ctl;
 	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
 	unsigned long cr4;
+	int efer_slot;
 
 	if (!dump_invalid_vmcs) {
 		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
@@ -5860,8 +5861,18 @@  void dump_vmcs(void)
 	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
 	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
 	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
+	efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
 		pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
+	else if (efer_slot >= 0)
+		pr_err("EFER= 0x%016llx (autoload)\n",
+		       vmx->msr_autoload.guest.val[efer_slot].value);
+	else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
+		pr_err("EFER= 0x%016llx (effective)\n",
+		       vcpu->arch.efer | (EFER_LMA | EFER_LME));
+	else
+		pr_err("EFER= 0x%016llx (effective)\n",
+		       vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
 	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
 		pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
 	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",