Message ID | 20241128004344.4072099-7-seanjc@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: x86: Prep KVM hypercall handling for TDX | expand |
On 28/11/24 02:43, Sean Christopherson wrote: > Rework __kvm_emulate_hypercall() into a macro so that completion of > hypercalls that don't exit to userspace use direct function calls to the > completion helper, i.e. don't trigger a retpoline when RETPOLINE=y. > > Opportunistically take the names of the input registers, as opposed to > taking the input values, to preemptively dedup more of the calling code > (TDX needs to use different registers). Use the direct GPR accessors to > read values to avoid the pointless marking of the registers as available > (KVM requires GPRs to always be available). For TDX, there is an RFC relating to using descriptively named parameters instead of register names for tdh_vp_enter(): https://lore.kernel.org/all/fa817f29-e3ba-4c54-8600-e28cf6ab1953@intel.com/ Please do give some feedback on that approach. Note we need both KVM and x86 maintainer approval for SEAMCALL wrappers like tdh_vp_enter(). As proposed, that ends up with putting the values back into vcpu->arch.regs[] for __kvm_emulate_hypercall() which is not pretty: static int tdx_emulate_vmcall(struct kvm_vcpu *vcpu) { + struct vcpu_tdx *tdx = to_tdx(vcpu); int r; + kvm_r10_write(vcpu, tdx->vp_enter_args.tdcall.fn); + kvm_r11_write(vcpu, tdx->vp_enter_args.tdcall.subfn); + kvm_r12_write(vcpu, tdx->vp_enter_args.tdcall.vmcall.p2); + kvm_r13_write(vcpu, tdx->vp_enter_args.tdcall.vmcall.p3); + kvm_r14_write(vcpu, tdx->vp_enter_args.tdcall.vmcall.p4); + /* * ABI for KVM tdvmcall argument: * In Guest-Hypervisor Communication Interface(GHCI) specification, @@ -1092,13 +1042,12 @@ static int tdx_emulate_vmcall(struct kvm_vcpu *vcpu) * vendor-specific. KVM uses this for KVM hypercall. NOTE: KVM * hypercall number starts from one. Zero isn't used for KVM hypercall * number. - * - * R10: KVM hypercall number - * arguments: R11, R12, R13, R14. */ r = __kvm_emulate_hypercall(vcpu, r10, r11, r12, r13, r14, true, 0, R10, complete_hypercall_exit); + tdvmcall_set_return_code(vcpu, kvm_r10_read(vcpu)); + return r > 0; } > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/x86.c | 29 +++++++++-------------------- > arch/x86/kvm/x86.h | 25 ++++++++++++++++++++----- > 2 files changed, 29 insertions(+), 25 deletions(-) > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 39be2a891ab4..fef8b4e63d25 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -9982,11 +9982,11 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) > return kvm_skip_emulated_instruction(vcpu); > } > > -int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, > - unsigned long a0, unsigned long a1, > - unsigned long a2, unsigned long a3, > - int op_64_bit, int cpl, > - int (*complete_hypercall)(struct kvm_vcpu *)) > +int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, > + unsigned long a0, unsigned long a1, > + unsigned long a2, unsigned long a3, > + int op_64_bit, int cpl, > + int (*complete_hypercall)(struct kvm_vcpu *)) > { > unsigned long ret; > > @@ -10073,32 +10073,21 @@ int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, > > out: > vcpu->run->hypercall.ret = ret; > - complete_hypercall(vcpu); > return 1; > } > -EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall); > +EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall); > > int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) > { > - unsigned long nr, a0, a1, a2, a3; > - int op_64_bit; > - int cpl; > - > if (kvm_xen_hypercall_enabled(vcpu->kvm)) > return kvm_xen_hypercall(vcpu); > > if (kvm_hv_hypercall_enabled(vcpu)) > return kvm_hv_hypercall(vcpu); > > - nr = kvm_rax_read(vcpu); > - a0 = kvm_rbx_read(vcpu); > - a1 = kvm_rcx_read(vcpu); > - a2 = kvm_rdx_read(vcpu); > - a3 = kvm_rsi_read(vcpu); > - op_64_bit = is_64_bit_hypercall(vcpu); > - cpl = kvm_x86_call(get_cpl)(vcpu); > - > - return __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, > + return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi, > + is_64_bit_hypercall(vcpu), > + kvm_x86_call(get_cpl)(vcpu), > complete_hypercall_exit); > } > EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > index 28adc8ea04bf..ad6fe6159dea 100644 > --- a/arch/x86/kvm/x86.h > +++ b/arch/x86/kvm/x86.h > @@ -617,11 +617,26 @@ static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr) > return kvm->arch.hypercall_exit_enabled & BIT(hc_nr); > } > > -int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, > - unsigned long a0, unsigned long a1, > - unsigned long a2, unsigned long a3, > - int op_64_bit, int cpl, > - int (*complete_hypercall)(struct kvm_vcpu *)); > +int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, > + unsigned long a0, unsigned long a1, > + unsigned long a2, unsigned long a3, > + int op_64_bit, int cpl, > + int (*complete_hypercall)(struct kvm_vcpu *)); > + > +#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \ > +({ \ > + int __ret; \ > + \ > + __ret = ____kvm_emulate_hypercall(_vcpu, \ > + kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu), \ > + kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu), \ > + kvm_##a3##_read(_vcpu), op_64_bit, cpl, \ > + complete_hypercall); \ > + \ > + if (__ret > 0) \ > + complete_hypercall(_vcpu); \ > + __ret; \ > +}) > > int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); >
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39be2a891ab4..fef8b4e63d25 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9982,11 +9982,11 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } -int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, - unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, - int op_64_bit, int cpl, - int (*complete_hypercall)(struct kvm_vcpu *)) +int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, + unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + int op_64_bit, int cpl, + int (*complete_hypercall)(struct kvm_vcpu *)) { unsigned long ret; @@ -10073,32 +10073,21 @@ int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, out: vcpu->run->hypercall.ret = ret; - complete_hypercall(vcpu); return 1; } -EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall); +EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { - unsigned long nr, a0, a1, a2, a3; - int op_64_bit; - int cpl; - if (kvm_xen_hypercall_enabled(vcpu->kvm)) return kvm_xen_hypercall(vcpu); if (kvm_hv_hypercall_enabled(vcpu)) return kvm_hv_hypercall(vcpu); - nr = kvm_rax_read(vcpu); - a0 = kvm_rbx_read(vcpu); - a1 = kvm_rcx_read(vcpu); - a2 = kvm_rdx_read(vcpu); - a3 = kvm_rsi_read(vcpu); - op_64_bit = is_64_bit_hypercall(vcpu); - cpl = kvm_x86_call(get_cpl)(vcpu); - - return __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, + return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi, + is_64_bit_hypercall(vcpu), + kvm_x86_call(get_cpl)(vcpu), complete_hypercall_exit); } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 28adc8ea04bf..ad6fe6159dea 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -617,11 +617,26 @@ static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr) return kvm->arch.hypercall_exit_enabled & BIT(hc_nr); } -int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, - unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, - int op_64_bit, int cpl, - int (*complete_hypercall)(struct kvm_vcpu *)); +int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, + unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + int op_64_bit, int cpl, + int (*complete_hypercall)(struct kvm_vcpu *)); + +#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \ +({ \ + int __ret; \ + \ + __ret = ____kvm_emulate_hypercall(_vcpu, \ + kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu), \ + kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu), \ + kvm_##a3##_read(_vcpu), op_64_bit, cpl, \ + complete_hypercall); \ + \ + if (__ret > 0) \ + complete_hypercall(_vcpu); \ + __ret; \ +}) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
Rework __kvm_emulate_hypercall() into a macro so that completion of hypercalls that don't exit to userspace use direct function calls to the completion helper, i.e. don't trigger a retpoline when RETPOLINE=y. Opportunistically take the names of the input registers, as opposed to taking the input values, to preemptively dedup more of the calling code (TDX needs to use different registers). Use the direct GPR accessors to read values to avoid the pointless marking of the registers as available (KVM requires GPRs to always be available). Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/x86.c | 29 +++++++++-------------------- arch/x86/kvm/x86.h | 25 ++++++++++++++++++++----- 2 files changed, 29 insertions(+), 25 deletions(-)