Message ID | 20250307212053.2948340-5-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: TDX: TD vcpu enter/exit | expand |
On 3/8/2025 5:20 AM, Paolo Bonzini wrote: > From: Isaku Yamahata <isaku.yamahata@intel.com> > > On entering/exiting TDX vcpu, preserved or clobbered CPU state is different > from the VMX case. Add TDX hooks to save/restore host/guest CPU state. > Save/restore kernel GS base MSR. Reviewed-by: Xiayao Li <xiaoyao.li@intel.com> > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> > Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> > Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> > Message-ID: <20250129095902.16391-7-adrian.hunter@intel.com> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/kvm/vmx/main.c | 24 +++++++++++++++++++++-- > arch/x86/kvm/vmx/tdx.c | 40 ++++++++++++++++++++++++++++++++++++++ > arch/x86/kvm/vmx/x86_ops.h | 4 ++++ > 3 files changed, 66 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c > index 037590fc05e9..c0497ed0c9be 100644 > --- a/arch/x86/kvm/vmx/main.c > +++ b/arch/x86/kvm/vmx/main.c > @@ -145,6 +145,26 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) > vmx_update_cpu_dirty_logging(vcpu); > } > > +static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu) > +{ > + if (is_td_vcpu(vcpu)) { > + tdx_prepare_switch_to_guest(vcpu); > + return; > + } > + > + vmx_prepare_switch_to_guest(vcpu); > +} > + > +static void vt_vcpu_put(struct kvm_vcpu *vcpu) > +{ > + if (is_td_vcpu(vcpu)) { > + tdx_vcpu_put(vcpu); > + return; > + } > + > + vmx_vcpu_put(vcpu); > +} > + > static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu) > { > if (is_td_vcpu(vcpu)) > @@ -265,9 +285,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = { > .vcpu_free = vt_vcpu_free, > .vcpu_reset = vt_vcpu_reset, > > - .prepare_switch_to_guest = vmx_prepare_switch_to_guest, > + .prepare_switch_to_guest = vt_prepare_switch_to_guest, > .vcpu_load = vt_vcpu_load, > - .vcpu_put = vmx_vcpu_put, > + .vcpu_put = vt_vcpu_put, > > .update_exception_bitmap = vmx_update_exception_bitmap, > .get_feature_msr = vmx_get_feature_msr, > diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c > index f50565f45b6a..94e08fdcb775 100644 > --- a/arch/x86/kvm/vmx/tdx.c > +++ b/arch/x86/kvm/vmx/tdx.c > @@ -3,6 +3,7 @@ > #include <linux/cpu.h> > #include <asm/cpufeature.h> > #include <linux/misc_cgroup.h> > +#include <linux/mmu_context.h> > #include <asm/tdx.h> > #include "capabilities.h" > #include "mmu.h" > @@ -12,6 +13,7 @@ > #include "vmx.h" > #include "mmu/spte.h" > #include "common.h" > +#include "posted_intr.h" > #include <trace/events/kvm.h> > #include "trace.h" > > @@ -624,6 +626,44 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > local_irq_enable(); > } > > +/* > + * Compared to vmx_prepare_switch_to_guest(), there is not much to do > + * as SEAMCALL/SEAMRET calls take care of most of save and restore. > + */ > +void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_vt *vt = to_vt(vcpu); > + > + if (vt->guest_state_loaded) > + return; > + > + if (likely(is_64bit_mm(current->mm))) > + vt->msr_host_kernel_gs_base = current->thread.gsbase; > + else > + vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); > + > + vt->guest_state_loaded = true; > +} > + > +static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu) > +{ > + struct vcpu_vt *vt = to_vt(vcpu); > + > + if (!vt->guest_state_loaded) > + return; > + > + ++vcpu->stat.host_state_reload; > + wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base); > + > + vt->guest_state_loaded = false; > +} > + > +void tdx_vcpu_put(struct kvm_vcpu *vcpu) > +{ > + vmx_vcpu_pi_put(vcpu); > + tdx_prepare_switch_to_host(vcpu); > +} > + > void tdx_vcpu_free(struct kvm_vcpu *vcpu) > { > struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm); > diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h > index 578c26d3aec4..cd18e9b1e124 100644 > --- a/arch/x86/kvm/vmx/x86_ops.h > +++ b/arch/x86/kvm/vmx/x86_ops.h > @@ -133,6 +133,8 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu); > void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); > int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu); > fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit); > +void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); > +void tdx_vcpu_put(struct kvm_vcpu *vcpu); > > int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp); > > @@ -164,6 +166,8 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat > { > return EXIT_FASTPATH_NONE; > } > +static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {} > +static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {} > > static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; } >
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 037590fc05e9..c0497ed0c9be 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -145,6 +145,26 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) vmx_update_cpu_dirty_logging(vcpu); } +static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu) +{ + if (is_td_vcpu(vcpu)) { + tdx_prepare_switch_to_guest(vcpu); + return; + } + + vmx_prepare_switch_to_guest(vcpu); +} + +static void vt_vcpu_put(struct kvm_vcpu *vcpu) +{ + if (is_td_vcpu(vcpu)) { + tdx_vcpu_put(vcpu); + return; + } + + vmx_vcpu_put(vcpu); +} + static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu) { if (is_td_vcpu(vcpu)) @@ -265,9 +285,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .vcpu_free = vt_vcpu_free, .vcpu_reset = vt_vcpu_reset, - .prepare_switch_to_guest = vmx_prepare_switch_to_guest, + .prepare_switch_to_guest = vt_prepare_switch_to_guest, .vcpu_load = vt_vcpu_load, - .vcpu_put = vmx_vcpu_put, + .vcpu_put = vt_vcpu_put, .update_exception_bitmap = vmx_update_exception_bitmap, .get_feature_msr = vmx_get_feature_msr, diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index f50565f45b6a..94e08fdcb775 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -3,6 +3,7 @@ #include <linux/cpu.h> #include <asm/cpufeature.h> #include <linux/misc_cgroup.h> +#include <linux/mmu_context.h> #include <asm/tdx.h> #include "capabilities.h" #include "mmu.h" @@ -12,6 +13,7 @@ #include "vmx.h" #include "mmu/spte.h" #include "common.h" +#include "posted_intr.h" #include <trace/events/kvm.h> #include "trace.h" @@ -624,6 +626,44 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) local_irq_enable(); } +/* + * Compared to vmx_prepare_switch_to_guest(), there is not much to do + * as SEAMCALL/SEAMRET calls take care of most of save and restore. + */ +void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) +{ + struct vcpu_vt *vt = to_vt(vcpu); + + if (vt->guest_state_loaded) + return; + + if (likely(is_64bit_mm(current->mm))) + vt->msr_host_kernel_gs_base = current->thread.gsbase; + else + vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); + + vt->guest_state_loaded = true; +} + +static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu) +{ + struct vcpu_vt *vt = to_vt(vcpu); + + if (!vt->guest_state_loaded) + return; + + ++vcpu->stat.host_state_reload; + wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base); + + vt->guest_state_loaded = false; +} + +void tdx_vcpu_put(struct kvm_vcpu *vcpu) +{ + vmx_vcpu_pi_put(vcpu); + tdx_prepare_switch_to_host(vcpu); +} + void tdx_vcpu_free(struct kvm_vcpu *vcpu) { struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm); diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 578c26d3aec4..cd18e9b1e124 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -133,6 +133,8 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu); void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu); fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit); +void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); +void tdx_vcpu_put(struct kvm_vcpu *vcpu); int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp); @@ -164,6 +166,8 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat { return EXIT_FASTPATH_NONE; } +static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {} +static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {} static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }