Message ID | 20210202165141.88275-2-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | use kvm_complete_insn_gp more | expand |
On Tue, Feb 02, 2021, Paolo Bonzini wrote: > Push the injection of #GP up to the callers, so that they can just use > kvm_complete_insn_gp. The SVM and VMX code is identical, IMO we should push all the code to x86.c instead of shuffling it around. I'd also like to change svm_exit_handlers to take @vcpu instead of @svm so that SVM can invoke common handlers directly. If you agree, I'll send a proper series to do the above, plus whatever other cleanups I find, e.g. INVD, WBINVD, etc... diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fa7b2df6422b..bf917efde35c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1530,7 +1530,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 687876211ebe..842a74d88f1b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2334,14 +2334,7 @@ static int wbinvd_interception(struct vcpu_svm *svm) static int xsetbv_interception(struct vcpu_svm *svm) { - u64 new_bv = kvm_read_edx_eax(&svm->vcpu); - u32 index = kvm_rcx_read(&svm->vcpu); - - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { - return kvm_skip_emulated_instruction(&svm->vcpu); - } - - return 1; + return kvm_emulate_xsetbv(&svm->vcpu); } static int rdpru_interception(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index cf0c397dc3eb..474a169835de 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5218,16 +5218,6 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu) return kvm_emulate_wbinvd(vcpu); } -static int handle_xsetbv(struct kvm_vcpu *vcpu) -{ - u64 new_bv = kvm_read_edx_eax(vcpu); - u32 index = kvm_rcx_read(vcpu); - - if (kvm_set_xcr(vcpu, index, new_bv) == 0) - return kvm_skip_emulated_instruction(vcpu); - return 1; -} - static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { @@ -5689,7 +5679,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, - [EXIT_REASON_XSETBV] = handle_xsetbv, + [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_GDTR_IDTR] = handle_desc, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 14fb8a138ec3..ef630f8d8bd2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -984,16 +984,17 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) return 0; } -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) { if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { kvm_inject_gp(vcpu, 0); return 1; } - return 0; + + return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_set_xcr); +EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) {
On 02/02/21 18:19, Sean Christopherson wrote: > On Tue, Feb 02, 2021, Paolo Bonzini wrote: >> Push the injection of #GP up to the callers, so that they can just use >> kvm_complete_insn_gp. > > The SVM and VMX code is identical, IMO we should push all the code to x86.c > instead of shuffling it around. > > I'd also like to change svm_exit_handlers to take @vcpu instead of @svm so that > SVM can invoke common handlers directly. > > If you agree, I'll send a proper series to do the above, plus whatever other > cleanups I find, e.g. INVD, WBINVD, etc... Yes, why not. There's a lot of things that are only slightly different between VMX and SVM for no particular reason. Paolo > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index fa7b2df6422b..bf917efde35c 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1530,7 +1530,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); > unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); > void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); > void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); > -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); > +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu); > > int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); > int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 687876211ebe..842a74d88f1b 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -2334,14 +2334,7 @@ static int wbinvd_interception(struct vcpu_svm *svm) > > static int xsetbv_interception(struct vcpu_svm *svm) > { > - u64 new_bv = kvm_read_edx_eax(&svm->vcpu); > - u32 index = kvm_rcx_read(&svm->vcpu); > - > - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { > - return kvm_skip_emulated_instruction(&svm->vcpu); > - } > - > - return 1; > + return kvm_emulate_xsetbv(&svm->vcpu); > } > > static int rdpru_interception(struct vcpu_svm *svm) > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index cf0c397dc3eb..474a169835de 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -5218,16 +5218,6 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu) > return kvm_emulate_wbinvd(vcpu); > } > > -static int handle_xsetbv(struct kvm_vcpu *vcpu) > -{ > - u64 new_bv = kvm_read_edx_eax(vcpu); > - u32 index = kvm_rcx_read(vcpu); > - > - if (kvm_set_xcr(vcpu, index, new_bv) == 0) > - return kvm_skip_emulated_instruction(vcpu); > - return 1; > -} > - > static int handle_apic_access(struct kvm_vcpu *vcpu) > { > if (likely(fasteoi)) { > @@ -5689,7 +5679,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { > [EXIT_REASON_APIC_WRITE] = handle_apic_write, > [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, > [EXIT_REASON_WBINVD] = handle_wbinvd, > - [EXIT_REASON_XSETBV] = handle_xsetbv, > + [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv, > [EXIT_REASON_TASK_SWITCH] = handle_task_switch, > [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, > [EXIT_REASON_GDTR_IDTR] = handle_desc, > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 14fb8a138ec3..ef630f8d8bd2 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -984,16 +984,17 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) > return 0; > } > > -int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) > +int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) > { > if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || > - __kvm_set_xcr(vcpu, index, xcr)) { > + __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { > kvm_inject_gp(vcpu, 0); > return 1; > } > - return 0; > + > + return kvm_skip_emulated_instruction(vcpu); > } > -EXPORT_SYMBOL_GPL(kvm_set_xcr); > +EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); > > bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) > { > >
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 687876211ebe..65d70b9691b4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2337,11 +2337,8 @@ static int xsetbv_interception(struct vcpu_svm *svm) u64 new_bv = kvm_read_edx_eax(&svm->vcpu); u32 index = kvm_rcx_read(&svm->vcpu); - if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { - return kvm_skip_emulated_instruction(&svm->vcpu); - } - - return 1; + int err = kvm_set_xcr(&svm->vcpu, index, new_bv); + return kvm_complete_insn_gp(&svm->vcpu, err); } static int rdpru_interception(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9986a59f71a4..28daceb4f70d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5227,9 +5227,8 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu) u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_rcx_read(vcpu); - if (kvm_set_xcr(vcpu, index, new_bv) == 0) - return kvm_skip_emulated_instruction(vcpu); - return 1; + int err = kvm_set_xcr(vcpu, index, new_bv); + return kvm_complete_insn_gp(vcpu, err); } static int handle_apic_access(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d14230dd38d8..08568c47337c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -986,12 +986,10 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || - __kvm_set_xcr(vcpu, index, xcr)) { - kvm_inject_gp(vcpu, 0); - return 1; - } - return 0; + if (static_call(kvm_x86_get_cpl)(vcpu) == 0) + return __kvm_set_xcr(vcpu, index, xcr); + + return 1; } EXPORT_SYMBOL_GPL(kvm_set_xcr);
Push the injection of #GP up to the callers, so that they can just use kvm_complete_insn_gp. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/svm/svm.c | 7 ++----- arch/x86/kvm/vmx/vmx.c | 5 ++--- arch/x86/kvm/x86.c | 10 ++++------ 3 files changed, 8 insertions(+), 14 deletions(-)