Message ID | 088bc637ef2c0f40f33a3f7c6a8ed0ed844ad111.1625186503.git.isaku.yamahata@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: X86: TDX support | expand |
On 03/07/21 00:05, isaku.yamahata@intel.com wrote: > From: Sean Christopherson <sean.j.christopherson@intel.com> > > Move the guts of vmx_cache_reg() to vt_cache_reg() in preparation for > reusing the bulk of the code for TDX, which can access guest state for > debug TDs. > > Use kvm_x86_ops.cache_reg() in ept_update_paging_mode_cr0() rather than > trying to expose vt_cache_reg() to vmx.c, even though it means taking a > retpoline. The code runs if and only if EPT is enabled but unrestricted > guest. Only one generation of CPU, Nehalem, supports EPT but not > unrestricted guest, and disabling unrestricted guest without also > disabling EPT is, to put it bluntly, dumb. > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> > --- > arch/x86/kvm/vmx/main.c | 37 +++++++++++++++++++++++++++++++++++- > arch/x86/kvm/vmx/vmx.c | 42 +---------------------------------------- > 2 files changed, 37 insertions(+), 42 deletions(-) > > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c > index 0d8d2a0a2979..b619615f77de 100644 > --- a/arch/x86/kvm/vmx/main.c > +++ b/arch/x86/kvm/vmx/main.c > @@ -341,7 +341,42 @@ static void vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) > > static void vt_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) > { > - vmx_cache_reg(vcpu, reg); > + unsigned long guest_owned_bits; > + > + kvm_register_mark_available(vcpu, reg); > + > + switch (reg) { > + case VCPU_REGS_RSP: > + vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); > + break; > + case VCPU_REGS_RIP: > + vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); > + break; > + case VCPU_EXREG_PDPTR: > + if (enable_ept) > + ept_save_pdptrs(vcpu); > + break; > + case VCPU_EXREG_CR0: > + guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; > + > + vcpu->arch.cr0 &= ~guest_owned_bits; > + vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; > + break; > + case VCPU_EXREG_CR3: > + if (is_unrestricted_guest(vcpu) || > + (enable_ept && is_paging(vcpu))) > + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); > + break; > + case VCPU_EXREG_CR4: > + guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; > + > + vcpu->arch.cr4 &= ~guest_owned_bits; > + vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; > + break; > + default: > + KVM_BUG_ON(1, vcpu->kvm); > + break; > + } > } > > static unsigned long vt_get_rflags(struct kvm_vcpu *vcpu) > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index e315a46d1566..3c3bfc80d2bb 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -2326,46 +2326,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > return ret; > } > > -static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) > -{ > - unsigned long guest_owned_bits; > - > - kvm_register_mark_available(vcpu, reg); > - > - switch (reg) { > - case VCPU_REGS_RSP: > - vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); > - break; > - case VCPU_REGS_RIP: > - vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); > - break; > - case VCPU_EXREG_PDPTR: > - if (enable_ept) > - ept_save_pdptrs(vcpu); > - break; > - case VCPU_EXREG_CR0: > - guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; > - > - vcpu->arch.cr0 &= ~guest_owned_bits; > - vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; > - break; > - case VCPU_EXREG_CR3: > - if (is_unrestricted_guest(vcpu) || > - (enable_ept && is_paging(vcpu))) > - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); > - break; > - case VCPU_EXREG_CR4: > - guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; > - > - vcpu->arch.cr4 &= ~guest_owned_bits; > - vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; > - break; > - default: > - KVM_BUG_ON(1, vcpu->kvm); > - break; > - } > -} > - > static __init int vmx_disabled_by_bios(void) > { > return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || > @@ -3066,7 +3026,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, > struct vcpu_vmx *vmx = to_vmx(vcpu); > > if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) > - vmx_cache_reg(vcpu, VCPU_EXREG_CR3); > + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); > if (!(cr0 & X86_CR0_PG)) { > /* From paging/starting to nonpaging */ > exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | > This shows the problem with #including vmx.c. You should have a .h file for both vmx.h and main.h (e.g. kvm_intel.h), so that here you can just use vt_cache_reg. Paolo
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 0d8d2a0a2979..b619615f77de 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -341,7 +341,42 @@ static void vt_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) static void vt_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { - vmx_cache_reg(vcpu, reg); + unsigned long guest_owned_bits; + + kvm_register_mark_available(vcpu, reg); + + switch (reg) { + case VCPU_REGS_RSP: + vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); + break; + case VCPU_REGS_RIP: + vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); + break; + case VCPU_EXREG_PDPTR: + if (enable_ept) + ept_save_pdptrs(vcpu); + break; + case VCPU_EXREG_CR0: + guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; + + vcpu->arch.cr0 &= ~guest_owned_bits; + vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; + break; + case VCPU_EXREG_CR3: + if (is_unrestricted_guest(vcpu) || + (enable_ept && is_paging(vcpu))) + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + break; + case VCPU_EXREG_CR4: + guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; + + vcpu->arch.cr4 &= ~guest_owned_bits; + vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; + break; + default: + KVM_BUG_ON(1, vcpu->kvm); + break; + } } static unsigned long vt_get_rflags(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e315a46d1566..3c3bfc80d2bb 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2326,46 +2326,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return ret; } -static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) -{ - unsigned long guest_owned_bits; - - kvm_register_mark_available(vcpu, reg); - - switch (reg) { - case VCPU_REGS_RSP: - vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); - break; - case VCPU_REGS_RIP: - vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); - break; - case VCPU_EXREG_PDPTR: - if (enable_ept) - ept_save_pdptrs(vcpu); - break; - case VCPU_EXREG_CR0: - guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; - - vcpu->arch.cr0 &= ~guest_owned_bits; - vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; - break; - case VCPU_EXREG_CR3: - if (is_unrestricted_guest(vcpu) || - (enable_ept && is_paging(vcpu))) - vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); - break; - case VCPU_EXREG_CR4: - guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; - - vcpu->arch.cr4 &= ~guest_owned_bits; - vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; - break; - default: - KVM_BUG_ON(1, vcpu->kvm); - break; - } -} - static __init int vmx_disabled_by_bios(void) { return !boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || @@ -3066,7 +3026,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, struct vcpu_vmx *vmx = to_vmx(vcpu); if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) - vmx_cache_reg(vcpu, VCPU_EXREG_CR3); + kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING |