Message ID | 20250211025828.3072076-2-binbin.wu@linux.intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: TDX: TDX interrupts | expand |
On 2/11/2025 10:58 AM, Binbin Wu wrote: [...] > diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c > index 63f66c51975a..f0644d0bbe11 100644 > --- a/arch/x86/kvm/irq.c > +++ b/arch/x86/kvm/irq.c > @@ -100,6 +100,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) > if (kvm_cpu_has_extint(v)) > return 1; > > + if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected) > + return static_call(kvm_x86_protected_apic_has_interrupt)(v); No functional impact. But forgot to replace "static_call(kvm_x86_protected_apic_has_interrupt)(v)" by "kvm_x86_call(protected_apic_has_interrupt)(v)" > + > return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ > } > EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); [...]
>diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c >index 7f1318c44040..2b1ea57a3a4e 100644 >--- a/arch/x86/kvm/vmx/main.c >+++ b/arch/x86/kvm/vmx/main.c >@@ -62,6 +62,8 @@ static __init int vt_hardware_setup(void) > vt_x86_ops.set_external_spte = tdx_sept_set_private_spte; > vt_x86_ops.free_external_spt = tdx_sept_free_private_spt; > vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte; Nit: I think it would be more consistent to set up .protected_apic_has_interrupt if TDX is enabled (rather than clearing it if TDX is disabled). >+ } else { >+ vt_x86_ops.protected_apic_has_interrupt = NULL; > } > > return 0; >@@ -371,6 +373,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { > .sync_pir_to_irr = vmx_sync_pir_to_irr, > .deliver_interrupt = vmx_deliver_interrupt, > .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, >+ .protected_apic_has_interrupt = tdx_protected_apic_has_interrupt, > > .set_tss_addr = vmx_set_tss_addr, > .set_identity_map_addr = vmx_set_identity_map_addr,
On Wed, Feb 12, 2025, Chao Gao wrote: > >diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c > >index 7f1318c44040..2b1ea57a3a4e 100644 > >--- a/arch/x86/kvm/vmx/main.c > >+++ b/arch/x86/kvm/vmx/main.c > >@@ -62,6 +62,8 @@ static __init int vt_hardware_setup(void) > > vt_x86_ops.set_external_spte = tdx_sept_set_private_spte; > > vt_x86_ops.free_external_spt = tdx_sept_free_private_spt; > > vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte; > > Nit: I think it would be more consistent to set up .protected_apic_has_interrupt > if TDX is enabled (rather than clearing it if TDX is disabled). I think my preference would be to do the vt_op_tdx_only() thing[*], wire up all TDX hooks by default via vt_op_tdx_only(), and then nullify them if TDX support isn't enabled. Or even just leave them set, e.g. based on the comment in vt_hardware_setup(), that can happen anyways. https://lore.kernel.org/all/Z6v9yjWLNTU6X90d@google.com
On Wed, Feb 12, 2025 at 08:04:49AM -0800, Sean Christopherson wrote: >On Wed, Feb 12, 2025, Chao Gao wrote: >> >diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c >> >index 7f1318c44040..2b1ea57a3a4e 100644 >> >--- a/arch/x86/kvm/vmx/main.c >> >+++ b/arch/x86/kvm/vmx/main.c >> >@@ -62,6 +62,8 @@ static __init int vt_hardware_setup(void) >> > vt_x86_ops.set_external_spte = tdx_sept_set_private_spte; >> > vt_x86_ops.free_external_spt = tdx_sept_free_private_spt; >> > vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte; >> >> Nit: I think it would be more consistent to set up .protected_apic_has_interrupt >> if TDX is enabled (rather than clearing it if TDX is disabled). > >I think my preference would be to do the vt_op_tdx_only() thing[*], wire up all >TDX hooks by default via vt_op_tdx_only(), Yes, that makes sense. I am fine as long as the hooks are set up in the same way. >and then nullify them if TDX support >isn't enabled. Or even just leave them set, e.g. based on the comment in >vt_hardware_setup(), that can happen anyways. Indeed. No need to nullify the hooks. > >https://lore.kernel.org/all/Z6v9yjWLNTU6X90d@google.com >
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index d953a454bafb..2eaabff66c82 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -115,6 +115,7 @@ KVM_X86_OP_OPTIONAL(pi_start_assignment) KVM_X86_OP_OPTIONAL(apicv_pre_state_restore) KVM_X86_OP_OPTIONAL(apicv_post_state_restore) KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt) +KVM_X86_OP_OPTIONAL(protected_apic_has_interrupt) KVM_X86_OP_OPTIONAL(set_hv_timer) KVM_X86_OP_OPTIONAL(cancel_hv_timer) KVM_X86_OP(setup_mce) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e855866bf600..ad275b606d68 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1838,6 +1838,7 @@ struct kvm_x86_ops { void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); + bool (*protected_apic_has_interrupt)(struct kvm_vcpu *vcpu); int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc, bool *expired); diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 63f66c51975a..f0644d0bbe11 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -100,6 +100,9 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) if (kvm_cpu_has_extint(v)) return 1; + if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected) + return static_call(kvm_x86_protected_apic_has_interrupt)(v); + return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index a1cbca31ec30..bbdede07d063 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2967,6 +2967,9 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) if (!kvm_apic_present(vcpu)) return -1; + if (apic->guest_apic_protected) + return -1; + __apic_update_ppr(apic, &ppr); return apic_has_interrupt_for_ppr(apic, ppr); } diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 1a8553ebdb42..e33c969439f7 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -65,6 +65,8 @@ struct kvm_lapic { bool sw_enabled; bool irr_pending; bool lvt0_in_nmi_mode; + /* Select registers in the vAPIC cannot be read/written. */ + bool guest_apic_protected; /* Number of bits set in ISR. */ s16 isr_count; /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 7f1318c44040..2b1ea57a3a4e 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -62,6 +62,8 @@ static __init int vt_hardware_setup(void) vt_x86_ops.set_external_spte = tdx_sept_set_private_spte; vt_x86_ops.free_external_spt = tdx_sept_free_private_spt; vt_x86_ops.remove_external_spte = tdx_sept_remove_private_spte; + } else { + vt_x86_ops.protected_apic_has_interrupt = NULL; } return 0; @@ -371,6 +373,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_interrupt = vmx_deliver_interrupt, .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, + .protected_apic_has_interrupt = tdx_protected_apic_has_interrupt, .set_tss_addr = vmx_set_tss_addr, .set_identity_map_addr = vmx_set_identity_map_addr, diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 8f3147c6e602..6940ce812730 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -668,6 +668,7 @@ int tdx_vcpu_create(struct kvm_vcpu *vcpu) return -EINVAL; fpstate_set_confidential(&vcpu->arch.guest_fpu); + vcpu->arch.apic->guest_apic_protected = true; vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX; @@ -709,6 +710,11 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) local_irq_enable(); } +bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + return pi_has_pending_interrupt(vcpu); +} + /* * Compared to vmx_prepare_switch_to_guest(), there is not much to do * as SEAMCALL/SEAMRET calls take care of most of save and restore. diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 92716f6486e9..8086e5c58cd6 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -135,6 +135,7 @@ int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu); fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit); void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); void tdx_vcpu_put(struct kvm_vcpu *vcpu); +bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu); int tdx_handle_exit(struct kvm_vcpu *vcpu, enum exit_fastpath_completion fastpath); void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, @@ -173,6 +174,7 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat } static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {} static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {} +static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; } static inline int tdx_handle_exit(struct kvm_vcpu *vcpu, enum exit_fastpath_completion fastpath) { return 0; } static inline void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1,