diff mbox series

[02/17] KVM: monolithic: x86: convert the kvm_x86_ops methods to external functions

Message ID 20190920212509.2578-3-aarcange@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM monolithic v1 | expand

Commit Message

Andrea Arcangeli Sept. 20, 2019, 9:24 p.m. UTC
This replaces all kvm_x86_ops pointer to functions with regular
external functions that don't require indirect calls.

This is the first commit of a series that aim to replace the modular
kvm.ko kernel module with a monolithic kvm-intel/kvm-amd model. This
change has the only possible cons of wasting some disk space in
/lib/modules/. The pros are that it saves CPUS and some minor RAM
which are more scarse resources than disk space.

The pointer to function virtual template model cannot provide any
runtime benefit because kvm-intel and kvm-amd can't be loaded at the
same time.

In practice this optimization results in a double digit percent
reduction in the vmexit latency with the default retpoline spectre v2
mitigation enabled in the host.

When the host is booted with spectre_v2=off this still results in a
measurable improvement of the order of 1% depending on the
architecture and workload. Supposedly userland workloads in guest
making lots of use of the BTB will benefit more.

To reduce the rejecting parts while tracking upstream, this doesn't
attempt to entirely remove the kvm_x86_ops structure yet, that is
meant for a later cleanup. The pmu ops have been already cleaned up in
this patchset because it was left completely unused right after the
conversion from pointer to functions to external functions.

Further incremental minor optimizations that weren't possible before
are now enabled by the monolithic model. For example it would be
possible later to convert some of the small external methods to inline
functions from the {svm,vmx}_ops.c file to kvm_ops.h. However that
will require more Makefile tweaks.

This is a list of the most common retpolines executed in KVM on VMX
under a guest workload triggering a high resolution timer SIGALRM
flood before the monolithic KVM patchset is applied.

[..]
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    cancel_hv_timer.isra.46+44
    restart_apic_timer+295
    kvm_set_msr_common+1435
    vmx_set_msr+478
    handle_wrmsr+85
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 65382
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+1646
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 66164
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_read_l1_tsc+41
    __kvm_wait_lapic_expire+60
    vmx_vcpu_run.part.88+1091
    vcpu_enter_guest+423
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 66199
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+4958
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 66227
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    restart_apic_timer+99
    kvm_set_msr_common+1435
    vmx_set_msr+478
    handle_wrmsr+85
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 130619
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_read_l1_tsc+41
    vmx_set_hv_timer+81
    restart_apic_timer+99
    kvm_set_msr_common+1435
    vmx_set_msr+478
    handle_wrmsr+85
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 130665
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_skip_emulated_instruction+49
    handle_wrmsr+102
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 131020
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_skip_emulated_instruction+82
    handle_wrmsr+102
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 131025
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    handle_wrmsr+85
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 131043
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    skip_emulated_instruction+48
    kvm_skip_emulated_instruction+82
    handle_wrmsr+102
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 131046
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+4009
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 132405
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rcx+33
    vcpu_enter_guest+1689
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 197697
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vmx_vcpu_run.part.88+358
    vcpu_enter_guest+423
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 198736
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+575
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 198771
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+423
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 198793
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+486
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 198801
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+168
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 198848
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 397680

@total: 3816655

Here the same but on SVM:

[..]
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    clockevents_program_event+148
    hrtimer_start_range_ns+528
    start_sw_timer+356
    restart_apic_timer+111
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36031
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    lapic_next_event+28
    clockevents_program_event+148
    hrtimer_start_range_ns+528
    start_sw_timer+356
    restart_apic_timer+111
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36063
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    ktime_get+58
    clockevents_program_event+84
    hrtimer_try_to_cancel+168
    hrtimer_cancel+21
    kvm_set_lapic_tscdeadline_msr+43
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36134
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    lapic_next_event+28
    clockevents_program_event+148
    hrtimer_try_to_cancel+168
    hrtimer_cancel+21
    kvm_set_lapic_tscdeadline_msr+43
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36146
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    clockevents_program_event+148
    hrtimer_try_to_cancel+168
    hrtimer_cancel+21
    kvm_set_lapic_tscdeadline_msr+43
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36190
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    ktime_get+58
    clockevents_program_event+84
    hrtimer_start_range_ns+528
    start_sw_timer+356
    restart_apic_timer+111
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 36281
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+1646
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 37752
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_read_l1_tsc+41
    __kvm_wait_lapic_expire+60
    svm_vcpu_run+1276
]: 37886
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+4958
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 37957
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_read_l1_tsc+41
    start_sw_timer+302
    restart_apic_timer+111
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 74358
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    ktime_get+58
    start_sw_timer+279
    restart_apic_timer+111
    kvm_set_msr_common+1435
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 74558
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_skip_emulated_instruction+82
    msr_interception+356
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 74713
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_skip_emulated_instruction+49
    msr_interception+356
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 74757
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    msr_interception+138
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 74795
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    kvm_get_rflags+28
    svm_interrupt_allowed+50
    vcpu_enter_guest+4009
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 75647
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+4009
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 75812
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rcx+33
    vcpu_enter_guest+1689
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 112579
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+575
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 113371
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+423
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 113386
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+486
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 113414
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+168
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 113601
@[
    trace_retpoline+1
    __trace_retpoline+30
    __x86_indirect_thunk_rax+33
    vcpu_enter_guest+772
    kvm_arch_vcpu_ioctl_run+263
    kvm_vcpu_ioctl+559
    do_vfs_ioctl+164
    ksys_ioctl+96
    __x64_sys_ioctl+22
    do_syscall_64+89
    entry_SYSCALL_64_after_hwframe+68
]: 227076

@total: 3829460

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
---
 arch/x86/include/asm/kvm_ops.h | 166 ++++++++
 arch/x86/kvm/svm_ops.c         | 672 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx_ops.c     | 672 +++++++++++++++++++++++++++++++++
 3 files changed, 1510 insertions(+)
 create mode 100644 arch/x86/include/asm/kvm_ops.h
 create mode 100644 arch/x86/kvm/svm_ops.c
 create mode 100644 arch/x86/kvm/vmx/vmx_ops.c

Comments

Paolo Bonzini Sept. 23, 2019, 10:19 a.m. UTC | #1
On 20/09/19 23:24, Andrea Arcangeli wrote:
> diff --git a/arch/x86/kvm/svm_ops.c b/arch/x86/kvm/svm_ops.c
> new file mode 100644
> index 000000000000..2aaabda92179
> --- /dev/null
> +++ b/arch/x86/kvm/svm_ops.c
> @@ -0,0 +1,672 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + *  arch/x86/kvm/svm_ops.c
> + *
> + *  Copyright 2019 Red Hat, Inc.
> + */
> +
> +int kvm_x86_ops_cpu_has_kvm_support(void)
> +{
> +	return has_svm();
> +}

Can you just rename all the functions in vmx/ and svm.c, instead of
adding forwarders?

Thanks,

Paolo

> +int kvm_x86_ops_disabled_by_bios(void)
> +{
> +	return is_disabled();
> +}
> +
> +int kvm_x86_ops_hardware_enable(void)
> +{
> +	return svm_hardware_enable();
> +}
> +
> +void kvm_x86_ops_hardware_disable(void)
> +{
> +	svm_hardware_disable();
> +}
> +
> +__init int kvm_x86_ops_check_processor_compatibility(void)
> +{
> +	return svm_check_processor_compat();
> +}
> +
> +__init int kvm_x86_ops_hardware_setup(void)
> +{
> +	return svm_hardware_setup();
> +}
> +
> +void kvm_x86_ops_hardware_unsetup(void)
> +{
> +	svm_hardware_unsetup();
> +}
> +
> +bool kvm_x86_ops_cpu_has_accelerated_tpr(void)
> +{
> +	return svm_cpu_has_accelerated_tpr();
> +}
> +
> +bool kvm_x86_ops_has_emulated_msr(int index)
> +{
> +	return svm_has_emulated_msr(index);
> +}
> +
> +void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu)
> +{
> +	svm_cpuid_update(vcpu);
> +}
> +
> +struct kvm *kvm_x86_ops_vm_alloc(void)
> +{
> +	return svm_vm_alloc();
> +}
> +
> +void kvm_x86_ops_vm_free(struct kvm *kvm)
> +{
> +	svm_vm_free(kvm);
> +}
> +
> +int kvm_x86_ops_vm_init(struct kvm *kvm)
> +{
> +	return avic_vm_init(kvm);
> +}
> +
> +void kvm_x86_ops_vm_destroy(struct kvm *kvm)
> +{
> +	svm_vm_destroy(kvm);
> +}
> +
> +struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id)
> +{
> +	return svm_create_vcpu(kvm, id);
> +}
> +
> +void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu)
> +{
> +	svm_free_vcpu(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> +{
> +	svm_vcpu_reset(vcpu, init_event);
> +}
> +
> +void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu)
> +{
> +	svm_prepare_guest_switch(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> +	svm_vcpu_load(vcpu, cpu);
> +}
> +
> +void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu)
> +{
> +	svm_vcpu_put(vcpu);
> +}
> +
> +void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu)
> +{
> +	update_bp_intercept(vcpu);
> +}
> +
> +int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> +	return svm_get_msr(vcpu, msr);
> +}
> +
> +int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> +	return svm_set_msr(vcpu, msr);
> +}
> +
> +u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg)
> +{
> +	return svm_get_segment_base(vcpu, seg);
> +}
> +
> +void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> +			     int seg)
> +{
> +	svm_get_segment(vcpu, var, seg);
> +}
> +
> +int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_cpl(vcpu);
> +}
> +
> +void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> +			     int seg)
> +{
> +	svm_set_segment(vcpu, var, seg);
> +}
> +
> +void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
> +{
> +	kvm_get_cs_db_l_bits(vcpu, db, l);
> +}
> +
> +void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
> +{
> +	svm_decache_cr0_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu)
> +{
> +	svm_decache_cr3(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
> +{
> +	svm_decache_cr4_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
> +{
> +	svm_set_cr0(vcpu, cr0);
> +}
> +
> +void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> +{
> +	svm_set_cr3(vcpu, cr3);
> +}
> +
> +int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> +{
> +	return svm_set_cr4(vcpu, cr4);
> +}
> +
> +void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +{
> +	svm_set_efer(vcpu, efer);
> +}
> +
> +void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	svm_get_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	svm_set_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	svm_get_gdt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	svm_set_gdt(vcpu, dt);
> +}
> +
> +u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_dr6(vcpu);
> +}
> +
> +void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
> +{
> +	svm_set_dr6(vcpu, value);
> +}
> +
> +void kvm_x86_ops_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
> +{
> +	svm_sync_dirty_debug_regs(vcpu);
> +}
> +
> +void kvm_x86_ops_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
> +{
> +	svm_set_dr7(vcpu, value);
> +}
> +
> +void kvm_x86_ops_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
> +{
> +	svm_cache_reg(vcpu, reg);
> +}
> +
> +unsigned long kvm_x86_ops_get_rflags(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_rflags(vcpu);
> +}
> +
> +void kvm_x86_ops_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
> +{
> +	svm_set_rflags(vcpu, rflags);
> +}
> +
> +void kvm_x86_ops_tlb_flush(struct kvm_vcpu *vcpu, bool invalidate_gpa)
> +{
> +	svm_flush_tlb(vcpu, invalidate_gpa);
> +}
> +
> +int kvm_x86_ops_tlb_remote_flush(struct kvm *kvm)
> +{
> +	return kvm_x86_ops->tlb_remote_flush(kvm);
> +}
> +
> +int kvm_x86_ops_tlb_remote_flush_with_range(struct kvm *kvm,
> +					    struct kvm_tlb_range *range)
> +{
> +	return kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
> +}
> +
> +void kvm_x86_ops_tlb_flush_gva(struct kvm_vcpu *vcpu, gva_t addr)
> +{
> +	svm_flush_tlb_gva(vcpu, addr);
> +}
> +
> +void kvm_x86_ops_run(struct kvm_vcpu *vcpu)
> +{
> +	svm_vcpu_run(vcpu);
> +}
> +
> +int kvm_x86_ops_handle_exit(struct kvm_vcpu *vcpu)
> +{
> +	return handle_exit(vcpu);
> +}
> +
> +int kvm_x86_ops_skip_emulated_instruction(struct kvm_vcpu *vcpu)
> +{
> +	return skip_emulated_instruction(vcpu);
> +}
> +
> +void kvm_x86_ops_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
> +{
> +	svm_set_interrupt_shadow(vcpu, mask);
> +}
> +
> +u32 kvm_x86_ops_get_interrupt_shadow(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_interrupt_shadow(vcpu);
> +}
> +
> +void kvm_x86_ops_patch_hypercall(struct kvm_vcpu *vcpu,
> +				 unsigned char *hypercall_addr)
> +{
> +	svm_patch_hypercall(vcpu, hypercall_addr);
> +}
> +
> +void kvm_x86_ops_set_irq(struct kvm_vcpu *vcpu)
> +{
> +	svm_set_irq(vcpu);
> +}
> +
> +void kvm_x86_ops_set_nmi(struct kvm_vcpu *vcpu)
> +{
> +	svm_inject_nmi(vcpu);
> +}
> +
> +void kvm_x86_ops_queue_exception(struct kvm_vcpu *vcpu)
> +{
> +	svm_queue_exception(vcpu);
> +}
> +
> +void kvm_x86_ops_cancel_injection(struct kvm_vcpu *vcpu)
> +{
> +	svm_cancel_injection(vcpu);
> +}
> +
> +int kvm_x86_ops_interrupt_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return svm_interrupt_allowed(vcpu);
> +}
> +
> +int kvm_x86_ops_nmi_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return svm_nmi_allowed(vcpu);
> +}
> +
> +bool kvm_x86_ops_get_nmi_mask(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_nmi_mask(vcpu);
> +}
> +
> +void kvm_x86_ops_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
> +{
> +	svm_set_nmi_mask(vcpu, masked);
> +}
> +
> +void kvm_x86_ops_enable_nmi_window(struct kvm_vcpu *vcpu)
> +{
> +	enable_nmi_window(vcpu);
> +}
> +
> +void kvm_x86_ops_enable_irq_window(struct kvm_vcpu *vcpu)
> +{
> +	enable_irq_window(vcpu);
> +}
> +
> +void kvm_x86_ops_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
> +{
> +	update_cr8_intercept(vcpu, tpr, irr);
> +}
> +
> +bool kvm_x86_ops_get_enable_apicv(struct kvm_vcpu *vcpu)
> +{
> +	return svm_get_enable_apicv(vcpu);
> +}
> +
> +void kvm_x86_ops_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
> +{
> +	svm_refresh_apicv_exec_ctrl(vcpu);
> +}
> +
> +void kvm_x86_ops_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
> +{
> +	svm_hwapic_irr_update(vcpu, max_irr);
> +}
> +
> +void kvm_x86_ops_hwapic_isr_update(struct kvm_vcpu *vcpu, int isr)
> +{
> +	svm_hwapic_isr_update(vcpu, isr);
> +}
> +
> +bool kvm_x86_ops_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_x86_ops->guest_apic_has_interrupt(vcpu);
> +}
> +
> +void kvm_x86_ops_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
> +{
> +	svm_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
> +}
> +
> +void kvm_x86_ops_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
> +{
> +	svm_set_virtual_apic_mode(vcpu);
> +}
> +
> +void kvm_x86_ops_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
> +{
> +	kvm_x86_ops->set_apic_access_page_addr(vcpu, hpa);
> +}
> +
> +void kvm_x86_ops_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
> +{
> +	svm_deliver_avic_intr(vcpu, vector);
> +}
> +
> +int kvm_x86_ops_sync_pir_to_irr(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_lapic_find_highest_irr(vcpu);
> +}
> +
> +int kvm_x86_ops_set_tss_addr(struct kvm *kvm, unsigned int addr)
> +{
> +	return svm_set_tss_addr(kvm, addr);
> +}
> +
> +int kvm_x86_ops_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
> +{
> +	return svm_set_identity_map_addr(kvm, ident_addr);
> +}
> +
> +int kvm_x86_ops_get_tdp_level(struct kvm_vcpu *vcpu)
> +{
> +	return get_npt_level(vcpu);
> +}
> +
> +u64 kvm_x86_ops_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> +{
> +	return svm_get_mt_mask(vcpu, gfn, is_mmio);
> +}
> +
> +int kvm_x86_ops_get_lpage_level(void)
> +{
> +	return svm_get_lpage_level();
> +}
> +
> +bool kvm_x86_ops_rdtscp_supported(void)
> +{
> +	return svm_rdtscp_supported();
> +}
> +
> +bool kvm_x86_ops_invpcid_supported(void)
> +{
> +	return svm_invpcid_supported();
> +}
> +
> +void kvm_x86_ops_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> +{
> +	set_tdp_cr3(vcpu, cr3);
> +}
> +
> +void kvm_x86_ops_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
> +{
> +	svm_set_supported_cpuid(func, entry);
> +}
> +
> +bool kvm_x86_ops_has_wbinvd_exit(void)
> +{
> +	return svm_has_wbinvd_exit();
> +}
> +
> +u64 kvm_x86_ops_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
> +{
> +	return svm_read_l1_tsc_offset(vcpu);
> +}
> +
> +u64 kvm_x86_ops_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> +{
> +	return svm_write_l1_tsc_offset(vcpu, offset);
> +}
> +
> +void kvm_x86_ops_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
> +{
> +	svm_get_exit_info(vcpu, info1, info2);
> +}
> +
> +int kvm_x86_ops_check_intercept(struct kvm_vcpu *vcpu,
> +				struct x86_instruction_info *info,
> +				enum x86_intercept_stage stage)
> +{
> +	return svm_check_intercept(vcpu, info, stage);
> +}
> +
> +void kvm_x86_ops_handle_exit_irqoff(struct kvm_vcpu *vcpu)
> +{
> +	svm_handle_exit_irqoff(vcpu);
> +}
> +
> +bool kvm_x86_ops_mpx_supported(void)
> +{
> +	return svm_mpx_supported();
> +}
> +
> +bool kvm_x86_ops_xsaves_supported(void)
> +{
> +	return svm_xsaves_supported();
> +}
> +
> +bool kvm_x86_ops_umip_emulated(void)
> +{
> +	return svm_umip_emulated();
> +}
> +
> +bool kvm_x86_ops_pt_supported(void)
> +{
> +	return svm_pt_supported();
> +}
> +
> +int kvm_x86_ops_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
> +{
> +	return kvm_x86_ops->check_nested_events(vcpu, external_intr);
> +}
> +
> +void kvm_x86_ops_request_immediate_exit(struct kvm_vcpu *vcpu)
> +{
> +	__kvm_request_immediate_exit(vcpu);
> +}
> +
> +void kvm_x86_ops_sched_in(struct kvm_vcpu *kvm, int cpu)
> +{
> +	svm_sched_in(kvm, cpu);
> +}
> +
> +void kvm_x86_ops_slot_enable_log_dirty(struct kvm *kvm,
> +				       struct kvm_memory_slot *slot)
> +{
> +	kvm_x86_ops->slot_enable_log_dirty(kvm, slot);
> +}
> +
> +void kvm_x86_ops_slot_disable_log_dirty(struct kvm *kvm,
> +					struct kvm_memory_slot *slot)
> +{
> +	kvm_x86_ops->slot_disable_log_dirty(kvm, slot);
> +}
> +
> +void kvm_x86_ops_flush_log_dirty(struct kvm *kvm)
> +{
> +	kvm_x86_ops->flush_log_dirty(kvm);
> +}
> +
> +void kvm_x86_ops_enable_log_dirty_pt_masked(struct kvm *kvm,
> +					    struct kvm_memory_slot *slot,
> +					    gfn_t offset, unsigned long mask)
> +{
> +	kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, offset, mask);
> +}
> +
> +int kvm_x86_ops_write_log_dirty(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_x86_ops->write_log_dirty(vcpu);
> +}
> +
> +int kvm_x86_ops_pre_block(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_x86_ops->pre_block(vcpu);
> +}
> +
> +void kvm_x86_ops_post_block(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->post_block(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_blocking(struct kvm_vcpu *vcpu)
> +{
> +	svm_vcpu_blocking(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_unblocking(struct kvm_vcpu *vcpu)
> +{
> +	svm_vcpu_unblocking(vcpu);
> +}
> +
> +int kvm_x86_ops_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
> +			       uint32_t guest_irq, bool set)
> +{
> +	return svm_update_pi_irte(kvm, host_irq, guest_irq, set);
> +}
> +
> +void kvm_x86_ops_apicv_post_state_restore(struct kvm_vcpu *vcpu)
> +{
> +	avic_post_state_restore(vcpu);
> +}
> +
> +bool kvm_x86_ops_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
> +{
> +	return svm_dy_apicv_has_pending_interrupt(vcpu);
> +}
> +
> +int kvm_x86_ops_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
> +			     bool *expired)
> +{
> +	return kvm_x86_ops->set_hv_timer(vcpu, guest_deadline_tsc, expired);
> +}
> +
> +void kvm_x86_ops_cancel_hv_timer(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->cancel_hv_timer(vcpu);
> +}
> +
> +void kvm_x86_ops_setup_mce(struct kvm_vcpu *vcpu)
> +{
> +	svm_setup_mce(vcpu);
> +}
> +
> +int kvm_x86_ops_get_nested_state(struct kvm_vcpu *vcpu,
> +				 struct kvm_nested_state __user *user_kvm_nested_state,
> +				 unsigned user_data_size)
> +{
> +	return kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
> +					     user_data_size);
> +}
> +
> +int kvm_x86_ops_set_nested_state(struct kvm_vcpu *vcpu,
> +				 struct kvm_nested_state __user *user_kvm_nested_state,
> +				 struct kvm_nested_state *kvm_state)
> +{
> +	return kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state,
> +					     kvm_state);
> +}
> +
> +void kvm_x86_ops_get_vmcs12_pages(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->get_vmcs12_pages(vcpu);
> +}
> +
> +int kvm_x86_ops_smi_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return svm_smi_allowed(vcpu);
> +}
> +
> +int kvm_x86_ops_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
> +{
> +	return svm_pre_enter_smm(vcpu, smstate);
> +}
> +
> +int kvm_x86_ops_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
> +{
> +	return svm_pre_leave_smm(vcpu, smstate);
> +}
> +
> +int kvm_x86_ops_enable_smi_window(struct kvm_vcpu *vcpu)
> +{
> +	return enable_smi_window(vcpu);
> +}
> +
> +int kvm_x86_ops_mem_enc_op(struct kvm *kvm, void __user *argp)
> +{
> +	return svm_mem_enc_op(kvm, argp);
> +}
> +
> +int kvm_x86_ops_mem_enc_reg_region(struct kvm *kvm,
> +				   struct kvm_enc_region *argp)
> +{
> +	return svm_register_enc_region(kvm, argp);
> +}
> +
> +int kvm_x86_ops_mem_enc_unreg_region(struct kvm *kvm,
> +				     struct kvm_enc_region *argp)
> +{
> +	return svm_unregister_enc_region(kvm, argp);
> +}
> +
> +int kvm_x86_ops_get_msr_feature(struct kvm_msr_entry *entry)
> +{
> +	return svm_get_msr_feature(entry);
> +}
> +
> +int kvm_x86_ops_nested_enable_evmcs(struct kvm_vcpu *vcpu,
> +				    uint16_t *vmcs_version)
> +{
> +	return nested_enable_evmcs(vcpu, vmcs_version);
> +}
> +
> +uint16_t kvm_x86_ops_nested_get_evmcs_version(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_x86_ops->nested_get_evmcs_version(vcpu);
> +}
> +
> +bool kvm_x86_ops_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
> +{
> +	return svm_need_emulation_on_page_fault(vcpu);
> +}
> +
> +bool kvm_x86_ops_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
> +{
> +	return svm_apic_init_signal_blocked(vcpu);
> +}
> diff --git a/arch/x86/kvm/vmx/vmx_ops.c b/arch/x86/kvm/vmx/vmx_ops.c
> new file mode 100644
> index 000000000000..cdcad73935d9
> --- /dev/null
> +++ b/arch/x86/kvm/vmx/vmx_ops.c
> @@ -0,0 +1,672 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + *  arch/x86/kvm/vmx/vmx_ops.c
> + *
> + *  Copyright 2019 Red Hat, Inc.
> + */
> +
> +__init int kvm_x86_ops_cpu_has_kvm_support(void)
> +{
> +	return cpu_has_kvm_support();
> +}
> +
> +__init int kvm_x86_ops_disabled_by_bios(void)
> +{
> +	return vmx_disabled_by_bios();
> +}
> +
> +int kvm_x86_ops_hardware_enable(void)
> +{
> +	return hardware_enable();
> +}
> +
> +void kvm_x86_ops_hardware_disable(void)
> +{
> +	hardware_disable();
> +}
> +
> +__init int kvm_x86_ops_check_processor_compatibility(void)
> +{
> +	return vmx_check_processor_compat();
> +}
> +
> +__init int kvm_x86_ops_hardware_setup(void)
> +{
> +	return hardware_setup();
> +}
> +
> +void kvm_x86_ops_hardware_unsetup(void)
> +{
> +	hardware_unsetup();
> +}
> +
> +bool kvm_x86_ops_cpu_has_accelerated_tpr(void)
> +{
> +	return report_flexpriority();
> +}
> +
> +bool kvm_x86_ops_has_emulated_msr(int index)
> +{
> +	return vmx_has_emulated_msr(index);
> +}
> +
> +void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu)
> +{
> +	vmx_cpuid_update(vcpu);
> +}
> +
> +struct kvm *kvm_x86_ops_vm_alloc(void)
> +{
> +	return vmx_vm_alloc();
> +}
> +
> +void kvm_x86_ops_vm_free(struct kvm *kvm)
> +{
> +	vmx_vm_free(kvm);
> +}
> +
> +int kvm_x86_ops_vm_init(struct kvm *kvm)
> +{
> +	return vmx_vm_init(kvm);
> +}
> +
> +void kvm_x86_ops_vm_destroy(struct kvm *kvm)
> +{
> +	kvm_x86_ops->vm_destroy(kvm);
> +}
> +
> +struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id)
> +{
> +	return vmx_create_vcpu(kvm, id);
> +}
> +
> +void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu)
> +{
> +	vmx_free_vcpu(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
> +{
> +	vmx_vcpu_reset(vcpu, init_event);
> +}
> +
> +void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu)
> +{
> +	vmx_prepare_switch_to_guest(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> +{
> +	vmx_vcpu_load(vcpu, cpu);
> +}
> +
> +void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu)
> +{
> +	vmx_vcpu_put(vcpu);
> +}
> +
> +void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu)
> +{
> +	update_exception_bitmap(vcpu);
> +}
> +
> +int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> +	return vmx_get_msr(vcpu, msr);
> +}
> +
> +int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
> +{
> +	return vmx_set_msr(vcpu, msr);
> +}
> +
> +u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg)
> +{
> +	return vmx_get_segment_base(vcpu, seg);
> +}
> +
> +void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> +			     int seg)
> +{
> +	vmx_get_segment(vcpu, var, seg);
> +}
> +
> +int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_cpl(vcpu);
> +}
> +
> +void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
> +			     int seg)
> +{
> +	vmx_set_segment(vcpu, var, seg);
> +}
> +
> +void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
> +{
> +	vmx_get_cs_db_l_bits(vcpu, db, l);
> +}
> +
> +void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
> +{
> +	vmx_decache_cr0_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu)
> +{
> +	vmx_decache_cr3(vcpu);
> +}
> +
> +void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
> +{
> +	vmx_decache_cr4_guest_bits(vcpu);
> +}
> +
> +void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
> +{
> +	vmx_set_cr0(vcpu, cr0);
> +}
> +
> +void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> +{
> +	vmx_set_cr3(vcpu, cr3);
> +}
> +
> +int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
> +{
> +	return vmx_set_cr4(vcpu, cr4);
> +}
> +
> +void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer)
> +{
> +	vmx_set_efer(vcpu, efer);
> +}
> +
> +void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	vmx_get_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	vmx_set_idt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	vmx_get_gdt(vcpu, dt);
> +}
> +
> +void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
> +{
> +	vmx_set_gdt(vcpu, dt);
> +}
> +
> +u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_dr6(vcpu);
> +}
> +
> +void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
> +{
> +	vmx_set_dr6(vcpu, value);
> +}
> +
> +void kvm_x86_ops_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
> +{
> +	vmx_sync_dirty_debug_regs(vcpu);
> +}
> +
> +void kvm_x86_ops_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
> +{
> +	vmx_set_dr7(vcpu, value);
> +}
> +
> +void kvm_x86_ops_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
> +{
> +	vmx_cache_reg(vcpu, reg);
> +}
> +
> +unsigned long kvm_x86_ops_get_rflags(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_rflags(vcpu);
> +}
> +
> +void kvm_x86_ops_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
> +{
> +	vmx_set_rflags(vcpu, rflags);
> +}
> +
> +void kvm_x86_ops_tlb_flush(struct kvm_vcpu *vcpu, bool invalidate_gpa)
> +{
> +	vmx_flush_tlb(vcpu, invalidate_gpa);
> +}
> +
> +int kvm_x86_ops_tlb_remote_flush(struct kvm *kvm)
> +{
> +	return kvm_x86_ops->tlb_remote_flush(kvm);
> +}
> +
> +int kvm_x86_ops_tlb_remote_flush_with_range(struct kvm *kvm,
> +					    struct kvm_tlb_range *range)
> +{
> +	return kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
> +}
> +
> +void kvm_x86_ops_tlb_flush_gva(struct kvm_vcpu *vcpu, gva_t addr)
> +{
> +	vmx_flush_tlb_gva(vcpu, addr);
> +}
> +
> +void kvm_x86_ops_run(struct kvm_vcpu *vcpu)
> +{
> +	vmx_vcpu_run(vcpu);
> +}
> +
> +int kvm_x86_ops_handle_exit(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_handle_exit(vcpu);
> +}
> +
> +int kvm_x86_ops_skip_emulated_instruction(struct kvm_vcpu *vcpu)
> +{
> +	return __skip_emulated_instruction(vcpu);
> +}
> +
> +void kvm_x86_ops_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
> +{
> +	vmx_set_interrupt_shadow(vcpu, mask);
> +}
> +
> +u32 kvm_x86_ops_get_interrupt_shadow(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_interrupt_shadow(vcpu);
> +}
> +
> +void kvm_x86_ops_patch_hypercall(struct kvm_vcpu *vcpu,
> +				 unsigned char *hypercall_addr)
> +{
> +	vmx_patch_hypercall(vcpu, hypercall_addr);
> +}
> +
> +void kvm_x86_ops_set_irq(struct kvm_vcpu *vcpu)
> +{
> +	vmx_inject_irq(vcpu);
> +}
> +
> +void kvm_x86_ops_set_nmi(struct kvm_vcpu *vcpu)
> +{
> +	vmx_inject_nmi(vcpu);
> +}
> +
> +void kvm_x86_ops_queue_exception(struct kvm_vcpu *vcpu)
> +{
> +	vmx_queue_exception(vcpu);
> +}
> +
> +void kvm_x86_ops_cancel_injection(struct kvm_vcpu *vcpu)
> +{
> +	vmx_cancel_injection(vcpu);
> +}
> +
> +int kvm_x86_ops_interrupt_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_interrupt_allowed(vcpu);
> +}
> +
> +int kvm_x86_ops_nmi_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_nmi_allowed(vcpu);
> +}
> +
> +bool kvm_x86_ops_get_nmi_mask(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_nmi_mask(vcpu);
> +}
> +
> +void kvm_x86_ops_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
> +{
> +	vmx_set_nmi_mask(vcpu, masked);
> +}
> +
> +void kvm_x86_ops_enable_nmi_window(struct kvm_vcpu *vcpu)
> +{
> +	enable_nmi_window(vcpu);
> +}
> +
> +void kvm_x86_ops_enable_irq_window(struct kvm_vcpu *vcpu)
> +{
> +	enable_irq_window(vcpu);
> +}
> +
> +void kvm_x86_ops_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
> +{
> +	update_cr8_intercept(vcpu, tpr, irr);
> +}
> +
> +bool kvm_x86_ops_get_enable_apicv(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_get_enable_apicv(vcpu);
> +}
> +
> +void kvm_x86_ops_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
> +{
> +	vmx_refresh_apicv_exec_ctrl(vcpu);
> +}
> +
> +void kvm_x86_ops_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
> +{
> +	vmx_hwapic_irr_update(vcpu, max_irr);
> +}
> +
> +void kvm_x86_ops_hwapic_isr_update(struct kvm_vcpu *vcpu, int isr)
> +{
> +	vmx_hwapic_isr_update(vcpu, isr);
> +}
> +
> +bool kvm_x86_ops_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_guest_apic_has_interrupt(vcpu);
> +}
> +
> +void kvm_x86_ops_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
> +{
> +	vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
> +}
> +
> +void kvm_x86_ops_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
> +{
> +	vmx_set_virtual_apic_mode(vcpu);
> +}
> +
> +void kvm_x86_ops_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
> +{
> +	vmx_set_apic_access_page_addr(vcpu, hpa);
> +}
> +
> +void kvm_x86_ops_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
> +{
> +	vmx_deliver_posted_interrupt(vcpu, vector);
> +}
> +
> +int kvm_x86_ops_sync_pir_to_irr(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_sync_pir_to_irr(vcpu);
> +}
> +
> +int kvm_x86_ops_set_tss_addr(struct kvm *kvm, unsigned int addr)
> +{
> +	return vmx_set_tss_addr(kvm, addr);
> +}
> +
> +int kvm_x86_ops_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
> +{
> +	return vmx_set_identity_map_addr(kvm, ident_addr);
> +}
> +
> +int kvm_x86_ops_get_tdp_level(struct kvm_vcpu *vcpu)
> +{
> +	return get_ept_level(vcpu);
> +}
> +
> +u64 kvm_x86_ops_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
> +{
> +	return vmx_get_mt_mask(vcpu, gfn, is_mmio);
> +}
> +
> +int kvm_x86_ops_get_lpage_level(void)
> +{
> +	return vmx_get_lpage_level();
> +}
> +
> +bool kvm_x86_ops_rdtscp_supported(void)
> +{
> +	return vmx_rdtscp_supported();
> +}
> +
> +bool kvm_x86_ops_invpcid_supported(void)
> +{
> +	return vmx_invpcid_supported();
> +}
> +
> +void kvm_x86_ops_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
> +{
> +	vmx_set_cr3(vcpu, cr3);
> +}
> +
> +void kvm_x86_ops_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
> +{
> +	vmx_set_supported_cpuid(func, entry);
> +}
> +
> +bool kvm_x86_ops_has_wbinvd_exit(void)
> +{
> +	return cpu_has_vmx_wbinvd_exit();
> +}
> +
> +u64 kvm_x86_ops_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_read_l1_tsc_offset(vcpu);
> +}
> +
> +u64 kvm_x86_ops_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> +{
> +	return vmx_write_l1_tsc_offset(vcpu, offset);
> +}
> +
> +void kvm_x86_ops_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
> +{
> +	vmx_get_exit_info(vcpu, info1, info2);
> +}
> +
> +int kvm_x86_ops_check_intercept(struct kvm_vcpu *vcpu,
> +				struct x86_instruction_info *info,
> +				enum x86_intercept_stage stage)
> +{
> +	return vmx_check_intercept(vcpu, info, stage);
> +}
> +
> +void kvm_x86_ops_handle_exit_irqoff(struct kvm_vcpu *vcpu)
> +{
> +	vmx_handle_exit_irqoff(vcpu);
> +}
> +
> +bool kvm_x86_ops_mpx_supported(void)
> +{
> +	return vmx_mpx_supported();
> +}
> +
> +bool kvm_x86_ops_xsaves_supported(void)
> +{
> +	return vmx_xsaves_supported();
> +}
> +
> +bool kvm_x86_ops_umip_emulated(void)
> +{
> +	return vmx_umip_emulated();
> +}
> +
> +bool kvm_x86_ops_pt_supported(void)
> +{
> +	return vmx_pt_supported();
> +}
> +
> +int kvm_x86_ops_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
> +{
> +	return kvm_x86_ops->check_nested_events(vcpu, external_intr);
> +}
> +
> +void kvm_x86_ops_request_immediate_exit(struct kvm_vcpu *vcpu)
> +{
> +	vmx_request_immediate_exit(vcpu);
> +}
> +
> +void kvm_x86_ops_sched_in(struct kvm_vcpu *kvm, int cpu)
> +{
> +	vmx_sched_in(kvm, cpu);
> +}
> +
> +void kvm_x86_ops_slot_enable_log_dirty(struct kvm *kvm,
> +				       struct kvm_memory_slot *slot)
> +{
> +	vmx_slot_enable_log_dirty(kvm, slot);
> +}
> +
> +void kvm_x86_ops_slot_disable_log_dirty(struct kvm *kvm,
> +					struct kvm_memory_slot *slot)
> +{
> +	vmx_slot_disable_log_dirty(kvm, slot);
> +}
> +
> +void kvm_x86_ops_flush_log_dirty(struct kvm *kvm)
> +{
> +	vmx_flush_log_dirty(kvm);
> +}
> +
> +void kvm_x86_ops_enable_log_dirty_pt_masked(struct kvm *kvm,
> +					    struct kvm_memory_slot *slot,
> +					    gfn_t offset, unsigned long mask)
> +{
> +	vmx_enable_log_dirty_pt_masked(kvm, slot, offset, mask);
> +}
> +
> +int kvm_x86_ops_write_log_dirty(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_write_pml_buffer(vcpu);
> +}
> +
> +int kvm_x86_ops_pre_block(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_pre_block(vcpu);
> +}
> +
> +void kvm_x86_ops_post_block(struct kvm_vcpu *vcpu)
> +{
> +	vmx_post_block(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_blocking(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->vcpu_blocking(vcpu);
> +}
> +
> +void kvm_x86_ops_vcpu_unblocking(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->vcpu_unblocking(vcpu);
> +}
> +
> +int kvm_x86_ops_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
> +			       uint32_t guest_irq, bool set)
> +{
> +	return vmx_update_pi_irte(kvm, host_irq, guest_irq, set);
> +}
> +
> +void kvm_x86_ops_apicv_post_state_restore(struct kvm_vcpu *vcpu)
> +{
> +	vmx_apicv_post_state_restore(vcpu);
> +}
> +
> +bool kvm_x86_ops_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_dy_apicv_has_pending_interrupt(vcpu);
> +}
> +
> +int kvm_x86_ops_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
> +			     bool *expired)
> +{
> +	return vmx_set_hv_timer(vcpu, guest_deadline_tsc, expired);
> +}
> +
> +void kvm_x86_ops_cancel_hv_timer(struct kvm_vcpu *vcpu)
> +{
> +	vmx_cancel_hv_timer(vcpu);
> +}
> +
> +void kvm_x86_ops_setup_mce(struct kvm_vcpu *vcpu)
> +{
> +	vmx_setup_mce(vcpu);
> +}
> +
> +int kvm_x86_ops_get_nested_state(struct kvm_vcpu *vcpu,
> +				 struct kvm_nested_state __user *user_kvm_nested_state,
> +				 unsigned user_data_size)
> +{
> +	return kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
> +					     user_data_size);
> +}
> +
> +int kvm_x86_ops_set_nested_state(struct kvm_vcpu *vcpu,
> +				 struct kvm_nested_state __user *user_kvm_nested_state,
> +				 struct kvm_nested_state *kvm_state)
> +{
> +	return kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state,
> +					     kvm_state);
> +}
> +
> +void kvm_x86_ops_get_vmcs12_pages(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->get_vmcs12_pages(vcpu);
> +}
> +
> +int kvm_x86_ops_smi_allowed(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_smi_allowed(vcpu);
> +}
> +
> +int kvm_x86_ops_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
> +{
> +	return vmx_pre_enter_smm(vcpu, smstate);
> +}
> +
> +int kvm_x86_ops_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
> +{
> +	return vmx_pre_leave_smm(vcpu, smstate);
> +}
> +
> +int kvm_x86_ops_enable_smi_window(struct kvm_vcpu *vcpu)
> +{
> +	return enable_smi_window(vcpu);
> +}
> +
> +int kvm_x86_ops_mem_enc_op(struct kvm *kvm, void __user *argp)
> +{
> +	return kvm_x86_ops->mem_enc_op(kvm, argp);
> +}
> +
> +int kvm_x86_ops_mem_enc_reg_region(struct kvm *kvm,
> +				   struct kvm_enc_region *argp)
> +{
> +	return kvm_x86_ops->mem_enc_reg_region(kvm, argp);
> +}
> +
> +int kvm_x86_ops_mem_enc_unreg_region(struct kvm *kvm,
> +				     struct kvm_enc_region *argp)
> +{
> +	return kvm_x86_ops->mem_enc_unreg_region(kvm, argp);
> +}
> +
> +int kvm_x86_ops_get_msr_feature(struct kvm_msr_entry *entry)
> +{
> +	return vmx_get_msr_feature(entry);
> +}
> +
> +int kvm_x86_ops_nested_enable_evmcs(struct kvm_vcpu *vcpu,
> +				    uint16_t *vmcs_version)
> +{
> +	return kvm_x86_ops->nested_enable_evmcs(vcpu, vmcs_version);
> +}
> +
> +uint16_t kvm_x86_ops_nested_get_evmcs_version(struct kvm_vcpu *vcpu)
> +{
> +	return kvm_x86_ops->nested_get_evmcs_version(vcpu);
> +}
> +
> +bool kvm_x86_ops_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_need_emulation_on_page_fault(vcpu);
> +}
> +
> +bool kvm_x86_ops_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
> +{
> +	return vmx_apic_init_signal_blocked(vcpu);
> +}
>
Sean Christopherson Sept. 23, 2019, 4:13 p.m. UTC | #2
On Mon, Sep 23, 2019 at 12:19:30PM +0200, Paolo Bonzini wrote:
> On 20/09/19 23:24, Andrea Arcangeli wrote:
> > diff --git a/arch/x86/kvm/svm_ops.c b/arch/x86/kvm/svm_ops.c
> > new file mode 100644
> > index 000000000000..2aaabda92179
> > --- /dev/null
> > +++ b/arch/x86/kvm/svm_ops.c
> > @@ -0,0 +1,672 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + *  arch/x86/kvm/svm_ops.c
> > + *
> > + *  Copyright 2019 Red Hat, Inc.
> > + */
> > +
> > +int kvm_x86_ops_cpu_has_kvm_support(void)
> > +{
> > +	return has_svm();
> > +}
> 
> Can you just rename all the functions in vmx/ and svm.c, instead of
> adding forwarders?

Yeah, having kvm_x86_ be analogous to kvm_arch_ seems like the obvious
approach.  The necessary VMX and SVM renaming can be done in separate
preparatory patches, and the conversion from kvm_x86_ops to direct calls
would be fairly straightforward.

Alternatively, what if we use macros in the call sites, e.g. keep/require
vmx_ and svm_ prefixes for all functions, renaming VMX and SVM code as
needed?  E.g.:

  cpu_has_vmx_support -> vmx_supported_by_cpu 
  cpu_has_svm_support -> svm_supported_by_cpu

  int vmx_disabled_by_bios(void)
  int svm_disabled_by_bios(void)


  #define X86_OP(name) kvm_x86_vendor##_##name

  int kvm_arch_init(void *opaque)
  {
	if (X86_OP(supported_by_cpu())) {
		printk(KERN_ERR "kvm: no hardware support\n");
		r = -EOPNOTSUPP;
		goto out;
	}
	if (X86_OP(disabled_by_bios())) {
		printk(KERN_ERR "kvm: disabled by bios\n");
		r = -EOPNOTSUPP;
		goto out;
	}	
  }

Pros:
  - Smaller patches due to less renaming in VMX and SVM
  - Calls to vendor code are very obvious
  - Stack traces contain vmx vs. svm instead of kvm_x86

Cons:
  - Macros
  - Annoying development environment, e.g. editors tend to struggle with
    macrofied funtion/variable names.
Paolo Bonzini Sept. 23, 2019, 4:51 p.m. UTC | #3
On 23/09/19 18:13, Sean Christopherson wrote:
> Alternatively, what if we use macros in the call sites, e.g. keep/require
> vmx_ and svm_ prefixes for all functions, renaming VMX and SVM code as
> needed?  E.g.:
> 
> 
>   #define X86_OP(name) kvm_x86_vendor##_##name
> 
>   int kvm_arch_init(void *opaque)
>   {
> 	if (X86_OP(supported_by_cpu())) {

Please no, the extra parentheses would be a mess to review.

Paolo
Andrea Arcangeli Sept. 23, 2019, 7:21 p.m. UTC | #4
On Mon, Sep 23, 2019 at 12:19:30PM +0200, Paolo Bonzini wrote:
> On 20/09/19 23:24, Andrea Arcangeli wrote:
> > diff --git a/arch/x86/kvm/svm_ops.c b/arch/x86/kvm/svm_ops.c
> > new file mode 100644
> > index 000000000000..2aaabda92179
> > --- /dev/null
> > +++ b/arch/x86/kvm/svm_ops.c
> > @@ -0,0 +1,672 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + *  arch/x86/kvm/svm_ops.c
> > + *
> > + *  Copyright 2019 Red Hat, Inc.
> > + */
> > +
> > +int kvm_x86_ops_cpu_has_kvm_support(void)
> > +{
> > +	return has_svm();
> > +}
> 
> Can you just rename all the functions in vmx/ and svm.c, instead of
> adding forwarders?

I can do that, I thought this was cleaner as it still retained the
abstraction separated from the mixup of the rest of the vmx/svm code,
but it'll work the same by dropping the abstraction in kvm_ops.h and
just maintaining a common name between the svm.c and vmx.c files, gcc
already built it that way after all.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_ops.h b/arch/x86/include/asm/kvm_ops.h
new file mode 100644
index 000000000000..1ec5380c9b67
--- /dev/null
+++ b/arch/x86/include/asm/kvm_ops.h
@@ -0,0 +1,166 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_KVM_OPS_H
+#define _ASM_X86_KVM_OPS_H
+
+extern __init int kvm_x86_ops_cpu_has_kvm_support(void);
+extern __init int kvm_x86_ops_disabled_by_bios(void);
+extern int kvm_x86_ops_hardware_enable(void);
+extern void kvm_x86_ops_hardware_disable(void);
+extern __init int kvm_x86_ops_check_processor_compatibility(void);
+extern __init int kvm_x86_ops_hardware_setup(void);
+extern void kvm_x86_ops_hardware_unsetup(void);
+extern bool kvm_x86_ops_cpu_has_accelerated_tpr(void);
+extern bool kvm_x86_ops_has_emulated_msr(int index);
+extern void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu);
+extern struct kvm *kvm_x86_ops_vm_alloc(void);
+extern void kvm_x86_ops_vm_free(struct kvm *kvm);
+extern int kvm_x86_ops_vm_init(struct kvm *kvm);
+extern void kvm_x86_ops_vm_destroy(struct kvm *kvm);
+extern struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id);
+extern void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+extern void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+extern int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+extern u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg);
+extern void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu,
+				    struct kvm_segment *var, int seg);
+extern int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu,
+				    struct kvm_segment *var, int seg);
+extern void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db,
+					 int *l);
+extern void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+extern void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+extern int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+extern void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+extern void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+extern void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+extern void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+extern void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+extern u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value);
+extern void kvm_x86_ops_sync_dirty_debug_regs(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_dr7(struct kvm_vcpu *vcpu, unsigned long value);
+extern void kvm_x86_ops_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+extern unsigned long kvm_x86_ops_get_rflags(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_rflags(struct kvm_vcpu *vcpu,
+				   unsigned long rflags);
+extern void kvm_x86_ops_tlb_flush(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+extern int kvm_x86_ops_tlb_remote_flush(struct kvm *kvm);
+extern int kvm_x86_ops_tlb_remote_flush_with_range(struct kvm *kvm,
+						   struct kvm_tlb_range *range);
+extern void kvm_x86_ops_tlb_flush_gva(struct kvm_vcpu *vcpu, gva_t addr);
+extern void kvm_x86_ops_run(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_handle_exit(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
+extern u32 kvm_x86_ops_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_patch_hypercall(struct kvm_vcpu *vcpu,
+					unsigned char *hypercall_addr);
+extern void kvm_x86_ops_set_irq(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_nmi(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_queue_exception(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_cancel_injection(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_interrupt_allowed(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_nmi_allowed(struct kvm_vcpu *vcpu);
+extern bool kvm_x86_ops_get_nmi_mask(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+extern void kvm_x86_ops_enable_nmi_window(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_enable_irq_window(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr,
+					     int irr);
+extern bool kvm_x86_ops_get_enable_apicv(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
+extern void kvm_x86_ops_hwapic_isr_update(struct kvm_vcpu *vcpu, int isr);
+extern bool kvm_x86_ops_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_load_eoi_exitmap(struct kvm_vcpu *vcpu,
+					 u64 *eoi_exit_bitmap);
+extern void kvm_x86_ops_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_set_apic_access_page_addr(struct kvm_vcpu *vcpu,
+						  hpa_t hpa);
+extern void kvm_x86_ops_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
+						 int vector);
+extern int kvm_x86_ops_sync_pir_to_irr(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_set_tss_addr(struct kvm *kvm, unsigned int addr);
+extern int kvm_x86_ops_set_identity_map_addr(struct kvm *kvm, u64 ident_addr);
+extern int kvm_x86_ops_get_tdp_level(struct kvm_vcpu *vcpu);
+extern u64 kvm_x86_ops_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn,
+				   bool is_mmio);
+extern int kvm_x86_ops_get_lpage_level(void);
+extern bool kvm_x86_ops_rdtscp_supported(void);
+extern bool kvm_x86_ops_invpcid_supported(void);
+extern void kvm_x86_ops_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+extern void kvm_x86_ops_set_supported_cpuid(u32 func,
+					    struct kvm_cpuid_entry2 *entry);
+extern bool kvm_x86_ops_has_wbinvd_exit(void);
+extern u64 kvm_x86_ops_read_l1_tsc_offset(struct kvm_vcpu *vcpu);
+extern u64 kvm_x86_ops_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset);
+extern void kvm_x86_ops_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1,
+				      u64 *info2);
+extern int kvm_x86_ops_check_intercept(struct kvm_vcpu *vcpu,
+				       struct x86_instruction_info *info,
+				       enum x86_intercept_stage stage);
+extern void kvm_x86_ops_handle_exit_irqoff(struct kvm_vcpu *vcpu);
+extern bool kvm_x86_ops_mpx_supported(void);
+extern bool kvm_x86_ops_xsaves_supported(void);
+extern bool kvm_x86_ops_umip_emulated(void);
+extern bool kvm_x86_ops_pt_supported(void);
+extern int kvm_x86_ops_check_nested_events(struct kvm_vcpu *vcpu,
+					   bool external_intr);
+extern void kvm_x86_ops_request_immediate_exit(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_sched_in(struct kvm_vcpu *kvm, int cpu);
+extern void kvm_x86_ops_slot_enable_log_dirty(struct kvm *kvm,
+					      struct kvm_memory_slot *slot);
+extern void kvm_x86_ops_slot_disable_log_dirty(struct kvm *kvm,
+					       struct kvm_memory_slot *slot);
+extern void kvm_x86_ops_flush_log_dirty(struct kvm *kvm);
+extern void kvm_x86_ops_enable_log_dirty_pt_masked(struct kvm *kvm,
+						   struct kvm_memory_slot *slot,
+						   gfn_t offset,
+						   unsigned long mask);
+extern int kvm_x86_ops_write_log_dirty(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_pre_block(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_post_block(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_vcpu_blocking(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_vcpu_unblocking(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+				      uint32_t guest_irq, bool set);
+extern void kvm_x86_ops_apicv_post_state_restore(struct kvm_vcpu *vcpu);
+extern bool kvm_x86_ops_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_set_hv_timer(struct kvm_vcpu *vcpu,
+				    u64 guest_deadline_tsc, bool *expired);
+extern void kvm_x86_ops_cancel_hv_timer(struct kvm_vcpu *vcpu);
+extern void kvm_x86_ops_setup_mce(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_get_nested_state(struct kvm_vcpu *vcpu,
+					struct kvm_nested_state __user *user_kvm_nested_state,
+					unsigned user_data_size);
+extern int kvm_x86_ops_set_nested_state(struct kvm_vcpu *vcpu,
+					struct kvm_nested_state __user *user_kvm_nested_state,
+					struct kvm_nested_state *kvm_state);
+extern void kvm_x86_ops_get_vmcs12_pages(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_smi_allowed(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate);
+extern int kvm_x86_ops_pre_leave_smm(struct kvm_vcpu *vcpu,
+				     const char *smstate);
+extern int kvm_x86_ops_enable_smi_window(struct kvm_vcpu *vcpu);
+extern int kvm_x86_ops_mem_enc_op(struct kvm *kvm, void __user *argp);
+extern int kvm_x86_ops_mem_enc_reg_region(struct kvm *kvm,
+					  struct kvm_enc_region *argp);
+extern int kvm_x86_ops_mem_enc_unreg_region(struct kvm *kvm,
+					    struct kvm_enc_region *argp);
+extern int kvm_x86_ops_get_msr_feature(struct kvm_msr_entry *entry);
+extern int kvm_x86_ops_nested_enable_evmcs(struct kvm_vcpu *vcpu,
+					   uint16_t *vmcs_version);
+extern uint16_t kvm_x86_ops_nested_get_evmcs_version(struct kvm_vcpu *vcpu);
+extern bool kvm_x86_ops_need_emulation_on_page_fault(struct kvm_vcpu *vcpu);
+extern bool kvm_x86_ops_apic_init_signal_blocked(struct kvm_vcpu *vcpu);
+
+#endif /* _ASM_X86_KVM_OPS_H */
diff --git a/arch/x86/kvm/svm_ops.c b/arch/x86/kvm/svm_ops.c
new file mode 100644
index 000000000000..2aaabda92179
--- /dev/null
+++ b/arch/x86/kvm/svm_ops.c
@@ -0,0 +1,672 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *  arch/x86/kvm/svm_ops.c
+ *
+ *  Copyright 2019 Red Hat, Inc.
+ */
+
+int kvm_x86_ops_cpu_has_kvm_support(void)
+{
+	return has_svm();
+}
+
+int kvm_x86_ops_disabled_by_bios(void)
+{
+	return is_disabled();
+}
+
+int kvm_x86_ops_hardware_enable(void)
+{
+	return svm_hardware_enable();
+}
+
+void kvm_x86_ops_hardware_disable(void)
+{
+	svm_hardware_disable();
+}
+
+__init int kvm_x86_ops_check_processor_compatibility(void)
+{
+	return svm_check_processor_compat();
+}
+
+__init int kvm_x86_ops_hardware_setup(void)
+{
+	return svm_hardware_setup();
+}
+
+void kvm_x86_ops_hardware_unsetup(void)
+{
+	svm_hardware_unsetup();
+}
+
+bool kvm_x86_ops_cpu_has_accelerated_tpr(void)
+{
+	return svm_cpu_has_accelerated_tpr();
+}
+
+bool kvm_x86_ops_has_emulated_msr(int index)
+{
+	return svm_has_emulated_msr(index);
+}
+
+void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu)
+{
+	svm_cpuid_update(vcpu);
+}
+
+struct kvm *kvm_x86_ops_vm_alloc(void)
+{
+	return svm_vm_alloc();
+}
+
+void kvm_x86_ops_vm_free(struct kvm *kvm)
+{
+	svm_vm_free(kvm);
+}
+
+int kvm_x86_ops_vm_init(struct kvm *kvm)
+{
+	return avic_vm_init(kvm);
+}
+
+void kvm_x86_ops_vm_destroy(struct kvm *kvm)
+{
+	svm_vm_destroy(kvm);
+}
+
+struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id)
+{
+	return svm_create_vcpu(kvm, id);
+}
+
+void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	svm_free_vcpu(vcpu);
+}
+
+void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+	svm_vcpu_reset(vcpu, init_event);
+}
+
+void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+	svm_prepare_guest_switch(vcpu);
+}
+
+void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	svm_vcpu_load(vcpu, cpu);
+}
+
+void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	svm_vcpu_put(vcpu);
+}
+
+void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu)
+{
+	update_bp_intercept(vcpu);
+}
+
+int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+	return svm_get_msr(vcpu, msr);
+}
+
+int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+	return svm_set_msr(vcpu, msr);
+}
+
+u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+	return svm_get_segment_base(vcpu, seg);
+}
+
+void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			     int seg)
+{
+	svm_get_segment(vcpu, var, seg);
+}
+
+int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu)
+{
+	return svm_get_cpl(vcpu);
+}
+
+void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			     int seg)
+{
+	svm_set_segment(vcpu, var, seg);
+}
+
+void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+	kvm_get_cs_db_l_bits(vcpu, db, l);
+}
+
+void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+	svm_decache_cr0_guest_bits(vcpu);
+}
+
+void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu)
+{
+	svm_decache_cr3(vcpu);
+}
+
+void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+	svm_decache_cr4_guest_bits(vcpu);
+}
+
+void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+	svm_set_cr0(vcpu, cr0);
+}
+
+void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+	svm_set_cr3(vcpu, cr3);
+}
+
+int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+	return svm_set_cr4(vcpu, cr4);
+}
+
+void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+	svm_set_efer(vcpu, efer);
+}
+
+void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	svm_get_idt(vcpu, dt);
+}
+
+void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	svm_set_idt(vcpu, dt);
+}
+
+void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	svm_get_gdt(vcpu, dt);
+}
+
+void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	svm_set_gdt(vcpu, dt);
+}
+
+u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu)
+{
+	return svm_get_dr6(vcpu);
+}
+
+void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
+{
+	svm_set_dr6(vcpu, value);
+}
+
+void kvm_x86_ops_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+	svm_sync_dirty_debug_regs(vcpu);
+}
+
+void kvm_x86_ops_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
+{
+	svm_set_dr7(vcpu, value);
+}
+
+void kvm_x86_ops_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+	svm_cache_reg(vcpu, reg);
+}
+
+unsigned long kvm_x86_ops_get_rflags(struct kvm_vcpu *vcpu)
+{
+	return svm_get_rflags(vcpu);
+}
+
+void kvm_x86_ops_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+	svm_set_rflags(vcpu, rflags);
+}
+
+void kvm_x86_ops_tlb_flush(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+	svm_flush_tlb(vcpu, invalidate_gpa);
+}
+
+int kvm_x86_ops_tlb_remote_flush(struct kvm *kvm)
+{
+	return kvm_x86_ops->tlb_remote_flush(kvm);
+}
+
+int kvm_x86_ops_tlb_remote_flush_with_range(struct kvm *kvm,
+					    struct kvm_tlb_range *range)
+{
+	return kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+}
+
+void kvm_x86_ops_tlb_flush_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+	svm_flush_tlb_gva(vcpu, addr);
+}
+
+void kvm_x86_ops_run(struct kvm_vcpu *vcpu)
+{
+	svm_vcpu_run(vcpu);
+}
+
+int kvm_x86_ops_handle_exit(struct kvm_vcpu *vcpu)
+{
+	return handle_exit(vcpu);
+}
+
+int kvm_x86_ops_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+	return skip_emulated_instruction(vcpu);
+}
+
+void kvm_x86_ops_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+{
+	svm_set_interrupt_shadow(vcpu, mask);
+}
+
+u32 kvm_x86_ops_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+{
+	return svm_get_interrupt_shadow(vcpu);
+}
+
+void kvm_x86_ops_patch_hypercall(struct kvm_vcpu *vcpu,
+				 unsigned char *hypercall_addr)
+{
+	svm_patch_hypercall(vcpu, hypercall_addr);
+}
+
+void kvm_x86_ops_set_irq(struct kvm_vcpu *vcpu)
+{
+	svm_set_irq(vcpu);
+}
+
+void kvm_x86_ops_set_nmi(struct kvm_vcpu *vcpu)
+{
+	svm_inject_nmi(vcpu);
+}
+
+void kvm_x86_ops_queue_exception(struct kvm_vcpu *vcpu)
+{
+	svm_queue_exception(vcpu);
+}
+
+void kvm_x86_ops_cancel_injection(struct kvm_vcpu *vcpu)
+{
+	svm_cancel_injection(vcpu);
+}
+
+int kvm_x86_ops_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+	return svm_interrupt_allowed(vcpu);
+}
+
+int kvm_x86_ops_nmi_allowed(struct kvm_vcpu *vcpu)
+{
+	return svm_nmi_allowed(vcpu);
+}
+
+bool kvm_x86_ops_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+	return svm_get_nmi_mask(vcpu);
+}
+
+void kvm_x86_ops_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+	svm_set_nmi_mask(vcpu, masked);
+}
+
+void kvm_x86_ops_enable_nmi_window(struct kvm_vcpu *vcpu)
+{
+	enable_nmi_window(vcpu);
+}
+
+void kvm_x86_ops_enable_irq_window(struct kvm_vcpu *vcpu)
+{
+	enable_irq_window(vcpu);
+}
+
+void kvm_x86_ops_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+	update_cr8_intercept(vcpu, tpr, irr);
+}
+
+bool kvm_x86_ops_get_enable_apicv(struct kvm_vcpu *vcpu)
+{
+	return svm_get_enable_apicv(vcpu);
+}
+
+void kvm_x86_ops_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+{
+	svm_refresh_apicv_exec_ctrl(vcpu);
+}
+
+void kvm_x86_ops_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+	svm_hwapic_irr_update(vcpu, max_irr);
+}
+
+void kvm_x86_ops_hwapic_isr_update(struct kvm_vcpu *vcpu, int isr)
+{
+	svm_hwapic_isr_update(vcpu, isr);
+}
+
+bool kvm_x86_ops_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+	return kvm_x86_ops->guest_apic_has_interrupt(vcpu);
+}
+
+void kvm_x86_ops_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+	svm_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
+void kvm_x86_ops_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+	svm_set_virtual_apic_mode(vcpu);
+}
+
+void kvm_x86_ops_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+{
+	kvm_x86_ops->set_apic_access_page_addr(vcpu, hpa);
+}
+
+void kvm_x86_ops_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+	svm_deliver_avic_intr(vcpu, vector);
+}
+
+int kvm_x86_ops_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+	return kvm_lapic_find_highest_irr(vcpu);
+}
+
+int kvm_x86_ops_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+	return svm_set_tss_addr(kvm, addr);
+}
+
+int kvm_x86_ops_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+	return svm_set_identity_map_addr(kvm, ident_addr);
+}
+
+int kvm_x86_ops_get_tdp_level(struct kvm_vcpu *vcpu)
+{
+	return get_npt_level(vcpu);
+}
+
+u64 kvm_x86_ops_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+	return svm_get_mt_mask(vcpu, gfn, is_mmio);
+}
+
+int kvm_x86_ops_get_lpage_level(void)
+{
+	return svm_get_lpage_level();
+}
+
+bool kvm_x86_ops_rdtscp_supported(void)
+{
+	return svm_rdtscp_supported();
+}
+
+bool kvm_x86_ops_invpcid_supported(void)
+{
+	return svm_invpcid_supported();
+}
+
+void kvm_x86_ops_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+	set_tdp_cr3(vcpu, cr3);
+}
+
+void kvm_x86_ops_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+	svm_set_supported_cpuid(func, entry);
+}
+
+bool kvm_x86_ops_has_wbinvd_exit(void)
+{
+	return svm_has_wbinvd_exit();
+}
+
+u64 kvm_x86_ops_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
+{
+	return svm_read_l1_tsc_offset(vcpu);
+}
+
+u64 kvm_x86_ops_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+	return svm_write_l1_tsc_offset(vcpu, offset);
+}
+
+void kvm_x86_ops_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	svm_get_exit_info(vcpu, info1, info2);
+}
+
+int kvm_x86_ops_check_intercept(struct kvm_vcpu *vcpu,
+				struct x86_instruction_info *info,
+				enum x86_intercept_stage stage)
+{
+	return svm_check_intercept(vcpu, info, stage);
+}
+
+void kvm_x86_ops_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+	svm_handle_exit_irqoff(vcpu);
+}
+
+bool kvm_x86_ops_mpx_supported(void)
+{
+	return svm_mpx_supported();
+}
+
+bool kvm_x86_ops_xsaves_supported(void)
+{
+	return svm_xsaves_supported();
+}
+
+bool kvm_x86_ops_umip_emulated(void)
+{
+	return svm_umip_emulated();
+}
+
+bool kvm_x86_ops_pt_supported(void)
+{
+	return svm_pt_supported();
+}
+
+int kvm_x86_ops_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+{
+	return kvm_x86_ops->check_nested_events(vcpu, external_intr);
+}
+
+void kvm_x86_ops_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+	__kvm_request_immediate_exit(vcpu);
+}
+
+void kvm_x86_ops_sched_in(struct kvm_vcpu *kvm, int cpu)
+{
+	svm_sched_in(kvm, cpu);
+}
+
+void kvm_x86_ops_slot_enable_log_dirty(struct kvm *kvm,
+				       struct kvm_memory_slot *slot)
+{
+	kvm_x86_ops->slot_enable_log_dirty(kvm, slot);
+}
+
+void kvm_x86_ops_slot_disable_log_dirty(struct kvm *kvm,
+					struct kvm_memory_slot *slot)
+{
+	kvm_x86_ops->slot_disable_log_dirty(kvm, slot);
+}
+
+void kvm_x86_ops_flush_log_dirty(struct kvm *kvm)
+{
+	kvm_x86_ops->flush_log_dirty(kvm);
+}
+
+void kvm_x86_ops_enable_log_dirty_pt_masked(struct kvm *kvm,
+					    struct kvm_memory_slot *slot,
+					    gfn_t offset, unsigned long mask)
+{
+	kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, offset, mask);
+}
+
+int kvm_x86_ops_write_log_dirty(struct kvm_vcpu *vcpu)
+{
+	return kvm_x86_ops->write_log_dirty(vcpu);
+}
+
+int kvm_x86_ops_pre_block(struct kvm_vcpu *vcpu)
+{
+	return kvm_x86_ops->pre_block(vcpu);
+}
+
+void kvm_x86_ops_post_block(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->post_block(vcpu);
+}
+
+void kvm_x86_ops_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+	svm_vcpu_blocking(vcpu);
+}
+
+void kvm_x86_ops_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+	svm_vcpu_unblocking(vcpu);
+}
+
+int kvm_x86_ops_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+			       uint32_t guest_irq, bool set)
+{
+	return svm_update_pi_irte(kvm, host_irq, guest_irq, set);
+}
+
+void kvm_x86_ops_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+{
+	avic_post_state_restore(vcpu);
+}
+
+bool kvm_x86_ops_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return svm_dy_apicv_has_pending_interrupt(vcpu);
+}
+
+int kvm_x86_ops_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+			     bool *expired)
+{
+	return kvm_x86_ops->set_hv_timer(vcpu, guest_deadline_tsc, expired);
+}
+
+void kvm_x86_ops_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->cancel_hv_timer(vcpu);
+}
+
+void kvm_x86_ops_setup_mce(struct kvm_vcpu *vcpu)
+{
+	svm_setup_mce(vcpu);
+}
+
+int kvm_x86_ops_get_nested_state(struct kvm_vcpu *vcpu,
+				 struct kvm_nested_state __user *user_kvm_nested_state,
+				 unsigned user_data_size)
+{
+	return kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+					     user_data_size);
+}
+
+int kvm_x86_ops_set_nested_state(struct kvm_vcpu *vcpu,
+				 struct kvm_nested_state __user *user_kvm_nested_state,
+				 struct kvm_nested_state *kvm_state)
+{
+	return kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state,
+					     kvm_state);
+}
+
+void kvm_x86_ops_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->get_vmcs12_pages(vcpu);
+}
+
+int kvm_x86_ops_smi_allowed(struct kvm_vcpu *vcpu)
+{
+	return svm_smi_allowed(vcpu);
+}
+
+int kvm_x86_ops_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	return svm_pre_enter_smm(vcpu, smstate);
+}
+
+int kvm_x86_ops_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+{
+	return svm_pre_leave_smm(vcpu, smstate);
+}
+
+int kvm_x86_ops_enable_smi_window(struct kvm_vcpu *vcpu)
+{
+	return enable_smi_window(vcpu);
+}
+
+int kvm_x86_ops_mem_enc_op(struct kvm *kvm, void __user *argp)
+{
+	return svm_mem_enc_op(kvm, argp);
+}
+
+int kvm_x86_ops_mem_enc_reg_region(struct kvm *kvm,
+				   struct kvm_enc_region *argp)
+{
+	return svm_register_enc_region(kvm, argp);
+}
+
+int kvm_x86_ops_mem_enc_unreg_region(struct kvm *kvm,
+				     struct kvm_enc_region *argp)
+{
+	return svm_unregister_enc_region(kvm, argp);
+}
+
+int kvm_x86_ops_get_msr_feature(struct kvm_msr_entry *entry)
+{
+	return svm_get_msr_feature(entry);
+}
+
+int kvm_x86_ops_nested_enable_evmcs(struct kvm_vcpu *vcpu,
+				    uint16_t *vmcs_version)
+{
+	return nested_enable_evmcs(vcpu, vmcs_version);
+}
+
+uint16_t kvm_x86_ops_nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+{
+	return kvm_x86_ops->nested_get_evmcs_version(vcpu);
+}
+
+bool kvm_x86_ops_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+	return svm_need_emulation_on_page_fault(vcpu);
+}
+
+bool kvm_x86_ops_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+	return svm_apic_init_signal_blocked(vcpu);
+}
diff --git a/arch/x86/kvm/vmx/vmx_ops.c b/arch/x86/kvm/vmx/vmx_ops.c
new file mode 100644
index 000000000000..cdcad73935d9
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmx_ops.c
@@ -0,0 +1,672 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *  arch/x86/kvm/vmx/vmx_ops.c
+ *
+ *  Copyright 2019 Red Hat, Inc.
+ */
+
+__init int kvm_x86_ops_cpu_has_kvm_support(void)
+{
+	return cpu_has_kvm_support();
+}
+
+__init int kvm_x86_ops_disabled_by_bios(void)
+{
+	return vmx_disabled_by_bios();
+}
+
+int kvm_x86_ops_hardware_enable(void)
+{
+	return hardware_enable();
+}
+
+void kvm_x86_ops_hardware_disable(void)
+{
+	hardware_disable();
+}
+
+__init int kvm_x86_ops_check_processor_compatibility(void)
+{
+	return vmx_check_processor_compat();
+}
+
+__init int kvm_x86_ops_hardware_setup(void)
+{
+	return hardware_setup();
+}
+
+void kvm_x86_ops_hardware_unsetup(void)
+{
+	hardware_unsetup();
+}
+
+bool kvm_x86_ops_cpu_has_accelerated_tpr(void)
+{
+	return report_flexpriority();
+}
+
+bool kvm_x86_ops_has_emulated_msr(int index)
+{
+	return vmx_has_emulated_msr(index);
+}
+
+void kvm_x86_ops_cpuid_update(struct kvm_vcpu *vcpu)
+{
+	vmx_cpuid_update(vcpu);
+}
+
+struct kvm *kvm_x86_ops_vm_alloc(void)
+{
+	return vmx_vm_alloc();
+}
+
+void kvm_x86_ops_vm_free(struct kvm *kvm)
+{
+	vmx_vm_free(kvm);
+}
+
+int kvm_x86_ops_vm_init(struct kvm *kvm)
+{
+	return vmx_vm_init(kvm);
+}
+
+void kvm_x86_ops_vm_destroy(struct kvm *kvm)
+{
+	kvm_x86_ops->vm_destroy(kvm);
+}
+
+struct kvm_vcpu *kvm_x86_ops_vcpu_create(struct kvm *kvm, unsigned id)
+{
+	return vmx_create_vcpu(kvm, id);
+}
+
+void kvm_x86_ops_vcpu_free(struct kvm_vcpu *vcpu)
+{
+	vmx_free_vcpu(vcpu);
+}
+
+void kvm_x86_ops_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+	vmx_vcpu_reset(vcpu, init_event);
+}
+
+void kvm_x86_ops_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+	vmx_prepare_switch_to_guest(vcpu);
+}
+
+void kvm_x86_ops_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+	vmx_vcpu_load(vcpu, cpu);
+}
+
+void kvm_x86_ops_vcpu_put(struct kvm_vcpu *vcpu)
+{
+	vmx_vcpu_put(vcpu);
+}
+
+void kvm_x86_ops_update_bp_intercept(struct kvm_vcpu *vcpu)
+{
+	update_exception_bitmap(vcpu);
+}
+
+int kvm_x86_ops_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+	return vmx_get_msr(vcpu, msr);
+}
+
+int kvm_x86_ops_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+{
+	return vmx_set_msr(vcpu, msr);
+}
+
+u64 kvm_x86_ops_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+	return vmx_get_segment_base(vcpu, seg);
+}
+
+void kvm_x86_ops_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			     int seg)
+{
+	vmx_get_segment(vcpu, var, seg);
+}
+
+int kvm_x86_ops_get_cpl(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_cpl(vcpu);
+}
+
+void kvm_x86_ops_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var,
+			     int seg)
+{
+	vmx_set_segment(vcpu, var, seg);
+}
+
+void kvm_x86_ops_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+	vmx_get_cs_db_l_bits(vcpu, db, l);
+}
+
+void kvm_x86_ops_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+	vmx_decache_cr0_guest_bits(vcpu);
+}
+
+void kvm_x86_ops_decache_cr3(struct kvm_vcpu *vcpu)
+{
+	vmx_decache_cr3(vcpu);
+}
+
+void kvm_x86_ops_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+	vmx_decache_cr4_guest_bits(vcpu);
+}
+
+void kvm_x86_ops_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+	vmx_set_cr0(vcpu, cr0);
+}
+
+void kvm_x86_ops_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+	vmx_set_cr3(vcpu, cr3);
+}
+
+int kvm_x86_ops_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+	return vmx_set_cr4(vcpu, cr4);
+}
+
+void kvm_x86_ops_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+	vmx_set_efer(vcpu, efer);
+}
+
+void kvm_x86_ops_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	vmx_get_idt(vcpu, dt);
+}
+
+void kvm_x86_ops_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	vmx_set_idt(vcpu, dt);
+}
+
+void kvm_x86_ops_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	vmx_get_gdt(vcpu, dt);
+}
+
+void kvm_x86_ops_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+	vmx_set_gdt(vcpu, dt);
+}
+
+u64 kvm_x86_ops_get_dr6(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_dr6(vcpu);
+}
+
+void kvm_x86_ops_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
+{
+	vmx_set_dr6(vcpu, value);
+}
+
+void kvm_x86_ops_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+	vmx_sync_dirty_debug_regs(vcpu);
+}
+
+void kvm_x86_ops_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
+{
+	vmx_set_dr7(vcpu, value);
+}
+
+void kvm_x86_ops_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+	vmx_cache_reg(vcpu, reg);
+}
+
+unsigned long kvm_x86_ops_get_rflags(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_rflags(vcpu);
+}
+
+void kvm_x86_ops_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+	vmx_set_rflags(vcpu, rflags);
+}
+
+void kvm_x86_ops_tlb_flush(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+	vmx_flush_tlb(vcpu, invalidate_gpa);
+}
+
+int kvm_x86_ops_tlb_remote_flush(struct kvm *kvm)
+{
+	return kvm_x86_ops->tlb_remote_flush(kvm);
+}
+
+int kvm_x86_ops_tlb_remote_flush_with_range(struct kvm *kvm,
+					    struct kvm_tlb_range *range)
+{
+	return kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+}
+
+void kvm_x86_ops_tlb_flush_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+	vmx_flush_tlb_gva(vcpu, addr);
+}
+
+void kvm_x86_ops_run(struct kvm_vcpu *vcpu)
+{
+	vmx_vcpu_run(vcpu);
+}
+
+int kvm_x86_ops_handle_exit(struct kvm_vcpu *vcpu)
+{
+	return vmx_handle_exit(vcpu);
+}
+
+int kvm_x86_ops_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+	return __skip_emulated_instruction(vcpu);
+}
+
+void kvm_x86_ops_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+{
+	vmx_set_interrupt_shadow(vcpu, mask);
+}
+
+u32 kvm_x86_ops_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_interrupt_shadow(vcpu);
+}
+
+void kvm_x86_ops_patch_hypercall(struct kvm_vcpu *vcpu,
+				 unsigned char *hypercall_addr)
+{
+	vmx_patch_hypercall(vcpu, hypercall_addr);
+}
+
+void kvm_x86_ops_set_irq(struct kvm_vcpu *vcpu)
+{
+	vmx_inject_irq(vcpu);
+}
+
+void kvm_x86_ops_set_nmi(struct kvm_vcpu *vcpu)
+{
+	vmx_inject_nmi(vcpu);
+}
+
+void kvm_x86_ops_queue_exception(struct kvm_vcpu *vcpu)
+{
+	vmx_queue_exception(vcpu);
+}
+
+void kvm_x86_ops_cancel_injection(struct kvm_vcpu *vcpu)
+{
+	vmx_cancel_injection(vcpu);
+}
+
+int kvm_x86_ops_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+	return vmx_interrupt_allowed(vcpu);
+}
+
+int kvm_x86_ops_nmi_allowed(struct kvm_vcpu *vcpu)
+{
+	return vmx_nmi_allowed(vcpu);
+}
+
+bool kvm_x86_ops_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_nmi_mask(vcpu);
+}
+
+void kvm_x86_ops_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+	vmx_set_nmi_mask(vcpu, masked);
+}
+
+void kvm_x86_ops_enable_nmi_window(struct kvm_vcpu *vcpu)
+{
+	enable_nmi_window(vcpu);
+}
+
+void kvm_x86_ops_enable_irq_window(struct kvm_vcpu *vcpu)
+{
+	enable_irq_window(vcpu);
+}
+
+void kvm_x86_ops_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+	update_cr8_intercept(vcpu, tpr, irr);
+}
+
+bool kvm_x86_ops_get_enable_apicv(struct kvm_vcpu *vcpu)
+{
+	return vmx_get_enable_apicv(vcpu);
+}
+
+void kvm_x86_ops_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+{
+	vmx_refresh_apicv_exec_ctrl(vcpu);
+}
+
+void kvm_x86_ops_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+	vmx_hwapic_irr_update(vcpu, max_irr);
+}
+
+void kvm_x86_ops_hwapic_isr_update(struct kvm_vcpu *vcpu, int isr)
+{
+	vmx_hwapic_isr_update(vcpu, isr);
+}
+
+bool kvm_x86_ops_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+	return vmx_guest_apic_has_interrupt(vcpu);
+}
+
+void kvm_x86_ops_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+	vmx_load_eoi_exitmap(vcpu, eoi_exit_bitmap);
+}
+
+void kvm_x86_ops_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+	vmx_set_virtual_apic_mode(vcpu);
+}
+
+void kvm_x86_ops_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+{
+	vmx_set_apic_access_page_addr(vcpu, hpa);
+}
+
+void kvm_x86_ops_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+	vmx_deliver_posted_interrupt(vcpu, vector);
+}
+
+int kvm_x86_ops_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+	return vmx_sync_pir_to_irr(vcpu);
+}
+
+int kvm_x86_ops_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+	return vmx_set_tss_addr(kvm, addr);
+}
+
+int kvm_x86_ops_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+	return vmx_set_identity_map_addr(kvm, ident_addr);
+}
+
+int kvm_x86_ops_get_tdp_level(struct kvm_vcpu *vcpu)
+{
+	return get_ept_level(vcpu);
+}
+
+u64 kvm_x86_ops_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+	return vmx_get_mt_mask(vcpu, gfn, is_mmio);
+}
+
+int kvm_x86_ops_get_lpage_level(void)
+{
+	return vmx_get_lpage_level();
+}
+
+bool kvm_x86_ops_rdtscp_supported(void)
+{
+	return vmx_rdtscp_supported();
+}
+
+bool kvm_x86_ops_invpcid_supported(void)
+{
+	return vmx_invpcid_supported();
+}
+
+void kvm_x86_ops_set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+	vmx_set_cr3(vcpu, cr3);
+}
+
+void kvm_x86_ops_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+	vmx_set_supported_cpuid(func, entry);
+}
+
+bool kvm_x86_ops_has_wbinvd_exit(void)
+{
+	return cpu_has_vmx_wbinvd_exit();
+}
+
+u64 kvm_x86_ops_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
+{
+	return vmx_read_l1_tsc_offset(vcpu);
+}
+
+u64 kvm_x86_ops_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+	return vmx_write_l1_tsc_offset(vcpu, offset);
+}
+
+void kvm_x86_ops_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	vmx_get_exit_info(vcpu, info1, info2);
+}
+
+int kvm_x86_ops_check_intercept(struct kvm_vcpu *vcpu,
+				struct x86_instruction_info *info,
+				enum x86_intercept_stage stage)
+{
+	return vmx_check_intercept(vcpu, info, stage);
+}
+
+void kvm_x86_ops_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+	vmx_handle_exit_irqoff(vcpu);
+}
+
+bool kvm_x86_ops_mpx_supported(void)
+{
+	return vmx_mpx_supported();
+}
+
+bool kvm_x86_ops_xsaves_supported(void)
+{
+	return vmx_xsaves_supported();
+}
+
+bool kvm_x86_ops_umip_emulated(void)
+{
+	return vmx_umip_emulated();
+}
+
+bool kvm_x86_ops_pt_supported(void)
+{
+	return vmx_pt_supported();
+}
+
+int kvm_x86_ops_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+{
+	return kvm_x86_ops->check_nested_events(vcpu, external_intr);
+}
+
+void kvm_x86_ops_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+	vmx_request_immediate_exit(vcpu);
+}
+
+void kvm_x86_ops_sched_in(struct kvm_vcpu *kvm, int cpu)
+{
+	vmx_sched_in(kvm, cpu);
+}
+
+void kvm_x86_ops_slot_enable_log_dirty(struct kvm *kvm,
+				       struct kvm_memory_slot *slot)
+{
+	vmx_slot_enable_log_dirty(kvm, slot);
+}
+
+void kvm_x86_ops_slot_disable_log_dirty(struct kvm *kvm,
+					struct kvm_memory_slot *slot)
+{
+	vmx_slot_disable_log_dirty(kvm, slot);
+}
+
+void kvm_x86_ops_flush_log_dirty(struct kvm *kvm)
+{
+	vmx_flush_log_dirty(kvm);
+}
+
+void kvm_x86_ops_enable_log_dirty_pt_masked(struct kvm *kvm,
+					    struct kvm_memory_slot *slot,
+					    gfn_t offset, unsigned long mask)
+{
+	vmx_enable_log_dirty_pt_masked(kvm, slot, offset, mask);
+}
+
+int kvm_x86_ops_write_log_dirty(struct kvm_vcpu *vcpu)
+{
+	return vmx_write_pml_buffer(vcpu);
+}
+
+int kvm_x86_ops_pre_block(struct kvm_vcpu *vcpu)
+{
+	return vmx_pre_block(vcpu);
+}
+
+void kvm_x86_ops_post_block(struct kvm_vcpu *vcpu)
+{
+	vmx_post_block(vcpu);
+}
+
+void kvm_x86_ops_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->vcpu_blocking(vcpu);
+}
+
+void kvm_x86_ops_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->vcpu_unblocking(vcpu);
+}
+
+int kvm_x86_ops_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+			       uint32_t guest_irq, bool set)
+{
+	return vmx_update_pi_irte(kvm, host_irq, guest_irq, set);
+}
+
+void kvm_x86_ops_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+{
+	vmx_apicv_post_state_restore(vcpu);
+}
+
+bool kvm_x86_ops_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+	return vmx_dy_apicv_has_pending_interrupt(vcpu);
+}
+
+int kvm_x86_ops_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+			     bool *expired)
+{
+	return vmx_set_hv_timer(vcpu, guest_deadline_tsc, expired);
+}
+
+void kvm_x86_ops_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+	vmx_cancel_hv_timer(vcpu);
+}
+
+void kvm_x86_ops_setup_mce(struct kvm_vcpu *vcpu)
+{
+	vmx_setup_mce(vcpu);
+}
+
+int kvm_x86_ops_get_nested_state(struct kvm_vcpu *vcpu,
+				 struct kvm_nested_state __user *user_kvm_nested_state,
+				 unsigned user_data_size)
+{
+	return kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
+					     user_data_size);
+}
+
+int kvm_x86_ops_set_nested_state(struct kvm_vcpu *vcpu,
+				 struct kvm_nested_state __user *user_kvm_nested_state,
+				 struct kvm_nested_state *kvm_state)
+{
+	return kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state,
+					     kvm_state);
+}
+
+void kvm_x86_ops_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->get_vmcs12_pages(vcpu);
+}
+
+int kvm_x86_ops_smi_allowed(struct kvm_vcpu *vcpu)
+{
+	return vmx_smi_allowed(vcpu);
+}
+
+int kvm_x86_ops_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+	return vmx_pre_enter_smm(vcpu, smstate);
+}
+
+int kvm_x86_ops_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+{
+	return vmx_pre_leave_smm(vcpu, smstate);
+}
+
+int kvm_x86_ops_enable_smi_window(struct kvm_vcpu *vcpu)
+{
+	return enable_smi_window(vcpu);
+}
+
+int kvm_x86_ops_mem_enc_op(struct kvm *kvm, void __user *argp)
+{
+	return kvm_x86_ops->mem_enc_op(kvm, argp);
+}
+
+int kvm_x86_ops_mem_enc_reg_region(struct kvm *kvm,
+				   struct kvm_enc_region *argp)
+{
+	return kvm_x86_ops->mem_enc_reg_region(kvm, argp);
+}
+
+int kvm_x86_ops_mem_enc_unreg_region(struct kvm *kvm,
+				     struct kvm_enc_region *argp)
+{
+	return kvm_x86_ops->mem_enc_unreg_region(kvm, argp);
+}
+
+int kvm_x86_ops_get_msr_feature(struct kvm_msr_entry *entry)
+{
+	return vmx_get_msr_feature(entry);
+}
+
+int kvm_x86_ops_nested_enable_evmcs(struct kvm_vcpu *vcpu,
+				    uint16_t *vmcs_version)
+{
+	return kvm_x86_ops->nested_enable_evmcs(vcpu, vmcs_version);
+}
+
+uint16_t kvm_x86_ops_nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+{
+	return kvm_x86_ops->nested_get_evmcs_version(vcpu);
+}
+
+bool kvm_x86_ops_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+{
+	return vmx_need_emulation_on_page_fault(vcpu);
+}
+
+bool kvm_x86_ops_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
+{
+	return vmx_apic_init_signal_blocked(vcpu);
+}