diff mbox series

[01/22] KVM: x86: Drop unnecessary and confusing KVM_X86_OP_NULL macro

Message ID 20220128005208.4008533-2-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: Fill *_x86_ops via kvm-x86-ops.h | expand

Commit Message

Sean Christopherson Jan. 28, 2022, 12:51 a.m. UTC
Drop KVM_X86_OP_NULL, which is superfluous and confusing.  The macro is
just a "pass-through" to KVM_X86_OP; it was added with the intent of
actually using it in the future, but that obviously never happened.  The
name is confusing because its intended use was to provide a way for
vendor implementations to specify a NULL pointer, and even if it were
used, wouldn't necessarily be synonymous with declaring a kvm_x86_op as
DEFINE_STATIC_CALL_NULL.

Lastly, actually using KVM_X86_OP_NULL as intended isn't a maintanable
approach, e.g. bleeds vendor details into common x86 code, and would
either be prone to bit rot or would require modifying common x86 code
when modifying a vendor implementation.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/include/asm/kvm-x86-ops.h | 76 ++++++++++++++----------------
 arch/x86/include/asm/kvm_host.h    |  2 -
 arch/x86/kvm/x86.c                 |  1 -
 3 files changed, 35 insertions(+), 44 deletions(-)

Comments

Paolo Bonzini Jan. 28, 2022, 10:11 a.m. UTC | #1
On 1/28/22 01:51, Sean Christopherson wrote:
> Drop KVM_X86_OP_NULL, which is superfluous and confusing.  The macro is
> just a "pass-through" to KVM_X86_OP; it was added with the intent of
> actually using it in the future, but that obviously never happened.  The
> name is confusing because its intended use was to provide a way for
> vendor implementations to specify a NULL pointer, and even if it were
> used, wouldn't necessarily be synonymous with declaring a kvm_x86_op as
> DEFINE_STATIC_CALL_NULL.
> 
> Lastly, actually using KVM_X86_OP_NULL as intended isn't a maintanable
> approach, e.g. bleeds vendor details into common x86 code, and would
> either be prone to bit rot or would require modifying common x86 code
> when modifying a vendor implementation.

I have some patches that redefine KVM_X86_OP_NULL as "must be used with 
static_call_cond".  That's a more interesting definition, as it can be 
used to WARN if KVM_X86_OP is used with a NULL function pointer.

Paolo

> No functional change intended.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   arch/x86/include/asm/kvm-x86-ops.h | 76 ++++++++++++++----------------
>   arch/x86/include/asm/kvm_host.h    |  2 -
>   arch/x86/kvm/x86.c                 |  1 -
>   3 files changed, 35 insertions(+), 44 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 631d5040b31e..e07151b2d1f6 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -1,25 +1,20 @@
>   /* SPDX-License-Identifier: GPL-2.0 */
> -#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL)
> +#ifndef KVM_X86_OP
>   BUILD_BUG_ON(1)
>   #endif
>   
>   /*
> - * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate
> - * "static_call()"s. They are also intended for use when defining
> - * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those
> - * functions that follow the [svm|vmx]_func_name convention.
> - * KVM_X86_OP_NULL() can leave a NULL definition for the
> - * case where there is no definition or a function name that
> - * doesn't match the typical naming convention is supplied.
> + * Invoke KVM_X86_OP() on all functions in struct kvm_x86_ops, e.g. to generate
> + * static_call declarations, definitions and updates.
>    */
> -KVM_X86_OP_NULL(hardware_enable)
> -KVM_X86_OP_NULL(hardware_disable)
> -KVM_X86_OP_NULL(hardware_unsetup)
> -KVM_X86_OP_NULL(cpu_has_accelerated_tpr)
> +KVM_X86_OP(hardware_enable)
> +KVM_X86_OP(hardware_disable)
> +KVM_X86_OP(hardware_unsetup)
> +KVM_X86_OP(cpu_has_accelerated_tpr)
>   KVM_X86_OP(has_emulated_msr)
>   KVM_X86_OP(vcpu_after_set_cpuid)
>   KVM_X86_OP(vm_init)
> -KVM_X86_OP_NULL(vm_destroy)
> +KVM_X86_OP(vm_destroy)
>   KVM_X86_OP(vcpu_create)
>   KVM_X86_OP(vcpu_free)
>   KVM_X86_OP(vcpu_reset)
> @@ -33,9 +28,9 @@ KVM_X86_OP(get_segment_base)
>   KVM_X86_OP(get_segment)
>   KVM_X86_OP(get_cpl)
>   KVM_X86_OP(set_segment)
> -KVM_X86_OP_NULL(get_cs_db_l_bits)
> +KVM_X86_OP(get_cs_db_l_bits)
>   KVM_X86_OP(set_cr0)
> -KVM_X86_OP_NULL(post_set_cr3)
> +KVM_X86_OP(post_set_cr3)
>   KVM_X86_OP(is_valid_cr4)
>   KVM_X86_OP(set_cr4)
>   KVM_X86_OP(set_efer)
> @@ -51,15 +46,15 @@ KVM_X86_OP(set_rflags)
>   KVM_X86_OP(get_if_flag)
>   KVM_X86_OP(tlb_flush_all)
>   KVM_X86_OP(tlb_flush_current)
> -KVM_X86_OP_NULL(tlb_remote_flush)
> -KVM_X86_OP_NULL(tlb_remote_flush_with_range)
> +KVM_X86_OP(tlb_remote_flush)
> +KVM_X86_OP(tlb_remote_flush_with_range)
>   KVM_X86_OP(tlb_flush_gva)
>   KVM_X86_OP(tlb_flush_guest)
>   KVM_X86_OP(vcpu_pre_run)
>   KVM_X86_OP(run)
> -KVM_X86_OP_NULL(handle_exit)
> -KVM_X86_OP_NULL(skip_emulated_instruction)
> -KVM_X86_OP_NULL(update_emulated_instruction)
> +KVM_X86_OP(handle_exit)
> +KVM_X86_OP(skip_emulated_instruction)
> +KVM_X86_OP(update_emulated_instruction)
>   KVM_X86_OP(set_interrupt_shadow)
>   KVM_X86_OP(get_interrupt_shadow)
>   KVM_X86_OP(patch_hypercall)
> @@ -78,17 +73,17 @@ KVM_X86_OP(check_apicv_inhibit_reasons)
>   KVM_X86_OP(refresh_apicv_exec_ctrl)
>   KVM_X86_OP(hwapic_irr_update)
>   KVM_X86_OP(hwapic_isr_update)
> -KVM_X86_OP_NULL(guest_apic_has_interrupt)
> +KVM_X86_OP(guest_apic_has_interrupt)
>   KVM_X86_OP(load_eoi_exitmap)
>   KVM_X86_OP(set_virtual_apic_mode)
> -KVM_X86_OP_NULL(set_apic_access_page_addr)
> +KVM_X86_OP(set_apic_access_page_addr)
>   KVM_X86_OP(deliver_posted_interrupt)
> -KVM_X86_OP_NULL(sync_pir_to_irr)
> +KVM_X86_OP(sync_pir_to_irr)
>   KVM_X86_OP(set_tss_addr)
>   KVM_X86_OP(set_identity_map_addr)
>   KVM_X86_OP(get_mt_mask)
>   KVM_X86_OP(load_mmu_pgd)
> -KVM_X86_OP_NULL(has_wbinvd_exit)
> +KVM_X86_OP(has_wbinvd_exit)
>   KVM_X86_OP(get_l2_tsc_offset)
>   KVM_X86_OP(get_l2_tsc_multiplier)
>   KVM_X86_OP(write_tsc_offset)
> @@ -96,32 +91,31 @@ KVM_X86_OP(write_tsc_multiplier)
>   KVM_X86_OP(get_exit_info)
>   KVM_X86_OP(check_intercept)
>   KVM_X86_OP(handle_exit_irqoff)
> -KVM_X86_OP_NULL(request_immediate_exit)
> +KVM_X86_OP(request_immediate_exit)
>   KVM_X86_OP(sched_in)
> -KVM_X86_OP_NULL(update_cpu_dirty_logging)
> -KVM_X86_OP_NULL(vcpu_blocking)
> -KVM_X86_OP_NULL(vcpu_unblocking)
> -KVM_X86_OP_NULL(update_pi_irte)
> -KVM_X86_OP_NULL(start_assignment)
> -KVM_X86_OP_NULL(apicv_post_state_restore)
> -KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
> -KVM_X86_OP_NULL(set_hv_timer)
> -KVM_X86_OP_NULL(cancel_hv_timer)
> +KVM_X86_OP(update_cpu_dirty_logging)
> +KVM_X86_OP(vcpu_blocking)
> +KVM_X86_OP(vcpu_unblocking)
> +KVM_X86_OP(update_pi_irte)
> +KVM_X86_OP(start_assignment)
> +KVM_X86_OP(apicv_post_state_restore)
> +KVM_X86_OP(dy_apicv_has_pending_interrupt)
> +KVM_X86_OP(set_hv_timer)
> +KVM_X86_OP(cancel_hv_timer)
>   KVM_X86_OP(setup_mce)
>   KVM_X86_OP(smi_allowed)
>   KVM_X86_OP(enter_smm)
>   KVM_X86_OP(leave_smm)
>   KVM_X86_OP(enable_smi_window)
> -KVM_X86_OP_NULL(mem_enc_op)
> -KVM_X86_OP_NULL(mem_enc_reg_region)
> -KVM_X86_OP_NULL(mem_enc_unreg_region)
> +KVM_X86_OP(mem_enc_op)
> +KVM_X86_OP(mem_enc_reg_region)
> +KVM_X86_OP(mem_enc_unreg_region)
>   KVM_X86_OP(get_msr_feature)
>   KVM_X86_OP(can_emulate_instruction)
>   KVM_X86_OP(apic_init_signal_blocked)
> -KVM_X86_OP_NULL(enable_direct_tlbflush)
> -KVM_X86_OP_NULL(migrate_timers)
> +KVM_X86_OP(enable_direct_tlbflush)
> +KVM_X86_OP(migrate_timers)
>   KVM_X86_OP(msr_filter_changed)
> -KVM_X86_OP_NULL(complete_emulated_msr)
> +KVM_X86_OP(complete_emulated_msr)
>   
>   #undef KVM_X86_OP
> -#undef KVM_X86_OP_NULL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b2c3721b1c98..756806d2e801 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1538,14 +1538,12 @@ extern struct kvm_x86_ops kvm_x86_ops;
>   
>   #define KVM_X86_OP(func) \
>   	DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
> -#define KVM_X86_OP_NULL KVM_X86_OP
>   #include <asm/kvm-x86-ops.h>
>   
>   static inline void kvm_ops_static_call_update(void)
>   {
>   #define KVM_X86_OP(func) \
>   	static_call_update(kvm_x86_##func, kvm_x86_ops.func);
> -#define KVM_X86_OP_NULL KVM_X86_OP
>   #include <asm/kvm-x86-ops.h>
>   }
>   
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 8033eca6f3a1..ebab514ec82a 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -129,7 +129,6 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
>   #define KVM_X86_OP(func)					     \
>   	DEFINE_STATIC_CALL_NULL(kvm_x86_##func,			     \
>   				*(((struct kvm_x86_ops *)0)->func));
> -#define KVM_X86_OP_NULL KVM_X86_OP
>   #include <asm/kvm-x86-ops.h>
>   EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
>   EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
Sean Christopherson Jan. 28, 2022, 3:42 p.m. UTC | #2
On Fri, Jan 28, 2022, Paolo Bonzini wrote:
> On 1/28/22 01:51, Sean Christopherson wrote:
> > Drop KVM_X86_OP_NULL, which is superfluous and confusing.  The macro is
> > just a "pass-through" to KVM_X86_OP; it was added with the intent of
> > actually using it in the future, but that obviously never happened.  The
> > name is confusing because its intended use was to provide a way for
> > vendor implementations to specify a NULL pointer, and even if it were
> > used, wouldn't necessarily be synonymous with declaring a kvm_x86_op as
> > DEFINE_STATIC_CALL_NULL.
> > 
> > Lastly, actually using KVM_X86_OP_NULL as intended isn't a maintanable
> > approach, e.g. bleeds vendor details into common x86 code, and would
> > either be prone to bit rot or would require modifying common x86 code
> > when modifying a vendor implementation.
> 
> I have some patches that redefine KVM_X86_OP_NULL as "must be used with
> static_call_cond".  That's a more interesting definition, as it can be used
> to WARN if KVM_X86_OP is used with a NULL function pointer.

I'm skeptical that will actually work well and be maintainble.  E.g. sync_pir_to_ir()
must be explicitly check for NULL in apic_has_interrupt_for_ppr(), forcing that path
to do static_call_cond() will be odd.  Ditto for ops that are wired up to ioctl()s,
e.g. the confidential VM stuff, and for ops that are guarded by other stuff, e.g. the
hypervisor timer.

Actually, it won't just be odd, it will be impossible to disallow NULL a pointer
for KVM_X86_OP and require static_call_cond() for KVM_X86_OP_NULL.  static_call_cond()
forces the return to "void", so any path that returns a value needs to be manually
guarded and can't use static_call_cond(), e.g.

arch/x86/kvm/x86.c: In function ‘kvm_arch_vm_ioctl’:
arch/x86/kvm/x86.c:6450:19: error: void value not ignored as it ought to be
 6450 |                 r = static_call_cond(kvm_x86_mem_enc_ioctl)(kvm, argp);
      |                   ^
Paolo Bonzini Jan. 31, 2022, 2:56 p.m. UTC | #3
On 1/28/22 16:42, Sean Christopherson wrote:
> On Fri, Jan 28, 2022, Paolo Bonzini wrote:
>> On 1/28/22 01:51, Sean Christopherson wrote:
>>> Drop KVM_X86_OP_NULL, which is superfluous and confusing.  The macro is
>>> just a "pass-through" to KVM_X86_OP; it was added with the intent of
>>> actually using it in the future, but that obviously never happened.  The
>>> name is confusing because its intended use was to provide a way for
>>> vendor implementations to specify a NULL pointer, and even if it were
>>> used, wouldn't necessarily be synonymous with declaring a kvm_x86_op as
>>> DEFINE_STATIC_CALL_NULL.
>>>
>>> Lastly, actually using KVM_X86_OP_NULL as intended isn't a maintanable
>>> approach, e.g. bleeds vendor details into common x86 code, and would
>>> either be prone to bit rot or would require modifying common x86 code
>>> when modifying a vendor implementation.
>>
>> I have some patches that redefine KVM_X86_OP_NULL as "must be used with
>> static_call_cond".  That's a more interesting definition, as it can be used
>> to WARN if KVM_X86_OP is used with a NULL function pointer.
> 
> I'm skeptical that will actually work well and be maintainble.  E.g. sync_pir_to_ir()
> must be explicitly check for NULL in apic_has_interrupt_for_ppr(), forcing that path
> to do static_call_cond() will be odd.  Ditto for ops that are wired up to ioctl()s,
> e.g. the confidential VM stuff, and for ops that are guarded by other stuff, e.g. the
> hypervisor timer.
> 
> Actually, it won't just be odd, it will be impossible to disallow NULL a pointer
> for KVM_X86_OP and require static_call_cond() for KVM_X86_OP_NULL.  static_call_cond()
> forces the return to "void", so any path that returns a value needs to be manually
> guarded and can't use static_call_cond(), e.g.

You're right and I should have looked up the series instead of going by 
memory.  What I did was mostly WARNing on KVM_X86_OP that sets NULL, as 
non-NULL ops are the common case.  I also added KVM_X86_OP_RET0 to 
remove some checks on kvm_x86_ops for ops that return a value.

All in all I totally agree with patches 2-11 and will apply them (patch 
2 to 5.17 even, as a prerequisite to fix the AVIC race).  Several of 
patches 13-21 are also mostly useful as it clarifies the code, and the 
others I guess are okay in the context of a coherent series though 
probably they would have been rejected as one-offs.  However, patches 12 
and 22 are unnecessary uses of the C preprocessor in my opinion.

Paolo
Maxim Levitsky Jan. 31, 2022, 3:19 p.m. UTC | #4
On Mon, 2022-01-31 at 15:56 +0100, Paolo Bonzini wrote:
> On 1/28/22 16:42, Sean Christopherson wrote:
> > On Fri, Jan 28, 2022, Paolo Bonzini wrote:
> > > On 1/28/22 01:51, Sean Christopherson wrote:
> > > > Drop KVM_X86_OP_NULL, which is superfluous and confusing.  The macro is
> > > > just a "pass-through" to KVM_X86_OP; it was added with the intent of
> > > > actually using it in the future, but that obviously never happened.  The
> > > > name is confusing because its intended use was to provide a way for
> > > > vendor implementations to specify a NULL pointer, and even if it were
> > > > used, wouldn't necessarily be synonymous with declaring a kvm_x86_op as
> > > > DEFINE_STATIC_CALL_NULL.
> > > > 
> > > > Lastly, actually using KVM_X86_OP_NULL as intended isn't a maintanable
> > > > approach, e.g. bleeds vendor details into common x86 code, and would
> > > > either be prone to bit rot or would require modifying common x86 code
> > > > when modifying a vendor implementation.
> > > 
> > > I have some patches that redefine KVM_X86_OP_NULL as "must be used with
> > > static_call_cond".  That's a more interesting definition, as it can be used
> > > to WARN if KVM_X86_OP is used with a NULL function pointer.
> > 
> > I'm skeptical that will actually work well and be maintainble.  E.g. sync_pir_to_ir()
> > must be explicitly check for NULL in apic_has_interrupt_for_ppr(), forcing that path
> > to do static_call_cond() will be odd.  Ditto for ops that are wired up to ioctl()s,
> > e.g. the confidential VM stuff, and for ops that are guarded by other stuff, e.g. the
> > hypervisor timer.
> > 
> > Actually, it won't just be odd, it will be impossible to disallow NULL a pointer
> > for KVM_X86_OP and require static_call_cond() for KVM_X86_OP_NULL.  static_call_cond()
> > forces the return to "void", so any path that returns a value needs to be manually
> > guarded and can't use static_call_cond(), e.g.
> 
> You're right and I should have looked up the series instead of going by 
> memory.  What I did was mostly WARNing on KVM_X86_OP that sets NULL, as 
> non-NULL ops are the common case.  I also added KVM_X86_OP_RET0 to 
> remove some checks on kvm_x86_ops for ops that return a value.
> 
> All in all I totally agree with patches 2-11 and will apply them (patch 
> 2 to 5.17 even, as a prerequisite to fix the AVIC race).  Several of 
> patches 13-21 are also mostly useful as it clarifies the code, and the 
> others I guess are okay in the context of a coherent series though 
> probably they would have been rejected as one-offs.  However, patches 12 
> and 22 are unnecessary uses of the C preprocessor in my opinion.
> 

I will send my patches very very soon - I'll rebase on top of this,
and review this patch series soon as well.

Best regards,
	Maxim Levitsky

> Paolo
>
Sean Christopherson Jan. 31, 2022, 4:48 p.m. UTC | #5
On Mon, Jan 31, 2022, Paolo Bonzini wrote:
> On 1/28/22 16:42, Sean Christopherson wrote:
> All in all I totally agree with patches 2-11 and will apply them (patch 2 to
> 5.17 even, as a prerequisite to fix the AVIC race).  Several of patches
> 13-21 are also mostly useful as it clarifies the code, and the others I
> guess are okay in the context of a coherent series though probably they
> would have been rejected as one-offs.

Yeah, the SEV changes in particular are a bit forced.  The only one I care deeply
about is mem_enc_op() => mem_enc_ioctl().  If the macro shenanigans are rejected,
I'd say drop patches 20 and 21, drop most of 19, and maybe give 18 (svm=>avic) the
boot as well.  I'd prefer to keep patch 17 (TLB tweak) to clarify the scope of
SVM's TLB flush.  Many of the changelogs would need to be tweaked as well, i.e. a
v2 is in order.

> However, patches 12 and 22 are unnecessary uses of the C preprocessor in my
> opinion.  

And 14 :-)

I don't have a super strong opinion.  I mostly worked on this because the idea
had been discussed multiple times in the past.  And because I wanted an excuse to
rename vmx_free_vcpu => vmx_vcpu_free, which for some reason I can never find :-)

I was/am concerned that the macro approach will make it more difficult to find a
vendor's implementation, though forcing a conforming name will mitigate that to
some degree.

The pros, in order of importance (IMO)

  1. Mostly forces vendor implementation name to match hook name
  2. Forces new hooks to get an entry in kvm-x86-ops.h
  3. Provides a bit of documentation for specialized hooks (APICv, etc...)
  4. Forces vendors to explicitly define something for non-conforming hooks
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 631d5040b31e..e07151b2d1f6 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -1,25 +1,20 @@ 
 /* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(KVM_X86_OP) || !defined(KVM_X86_OP_NULL)
+#ifndef KVM_X86_OP
 BUILD_BUG_ON(1)
 #endif
 
 /*
- * KVM_X86_OP() and KVM_X86_OP_NULL() are used to help generate
- * "static_call()"s. They are also intended for use when defining
- * the vmx/svm kvm_x86_ops. KVM_X86_OP() can be used for those
- * functions that follow the [svm|vmx]_func_name convention.
- * KVM_X86_OP_NULL() can leave a NULL definition for the
- * case where there is no definition or a function name that
- * doesn't match the typical naming convention is supplied.
+ * Invoke KVM_X86_OP() on all functions in struct kvm_x86_ops, e.g. to generate
+ * static_call declarations, definitions and updates.
  */
-KVM_X86_OP_NULL(hardware_enable)
-KVM_X86_OP_NULL(hardware_disable)
-KVM_X86_OP_NULL(hardware_unsetup)
-KVM_X86_OP_NULL(cpu_has_accelerated_tpr)
+KVM_X86_OP(hardware_enable)
+KVM_X86_OP(hardware_disable)
+KVM_X86_OP(hardware_unsetup)
+KVM_X86_OP(cpu_has_accelerated_tpr)
 KVM_X86_OP(has_emulated_msr)
 KVM_X86_OP(vcpu_after_set_cpuid)
 KVM_X86_OP(vm_init)
-KVM_X86_OP_NULL(vm_destroy)
+KVM_X86_OP(vm_destroy)
 KVM_X86_OP(vcpu_create)
 KVM_X86_OP(vcpu_free)
 KVM_X86_OP(vcpu_reset)
@@ -33,9 +28,9 @@  KVM_X86_OP(get_segment_base)
 KVM_X86_OP(get_segment)
 KVM_X86_OP(get_cpl)
 KVM_X86_OP(set_segment)
-KVM_X86_OP_NULL(get_cs_db_l_bits)
+KVM_X86_OP(get_cs_db_l_bits)
 KVM_X86_OP(set_cr0)
-KVM_X86_OP_NULL(post_set_cr3)
+KVM_X86_OP(post_set_cr3)
 KVM_X86_OP(is_valid_cr4)
 KVM_X86_OP(set_cr4)
 KVM_X86_OP(set_efer)
@@ -51,15 +46,15 @@  KVM_X86_OP(set_rflags)
 KVM_X86_OP(get_if_flag)
 KVM_X86_OP(tlb_flush_all)
 KVM_X86_OP(tlb_flush_current)
-KVM_X86_OP_NULL(tlb_remote_flush)
-KVM_X86_OP_NULL(tlb_remote_flush_with_range)
+KVM_X86_OP(tlb_remote_flush)
+KVM_X86_OP(tlb_remote_flush_with_range)
 KVM_X86_OP(tlb_flush_gva)
 KVM_X86_OP(tlb_flush_guest)
 KVM_X86_OP(vcpu_pre_run)
 KVM_X86_OP(run)
-KVM_X86_OP_NULL(handle_exit)
-KVM_X86_OP_NULL(skip_emulated_instruction)
-KVM_X86_OP_NULL(update_emulated_instruction)
+KVM_X86_OP(handle_exit)
+KVM_X86_OP(skip_emulated_instruction)
+KVM_X86_OP(update_emulated_instruction)
 KVM_X86_OP(set_interrupt_shadow)
 KVM_X86_OP(get_interrupt_shadow)
 KVM_X86_OP(patch_hypercall)
@@ -78,17 +73,17 @@  KVM_X86_OP(check_apicv_inhibit_reasons)
 KVM_X86_OP(refresh_apicv_exec_ctrl)
 KVM_X86_OP(hwapic_irr_update)
 KVM_X86_OP(hwapic_isr_update)
-KVM_X86_OP_NULL(guest_apic_has_interrupt)
+KVM_X86_OP(guest_apic_has_interrupt)
 KVM_X86_OP(load_eoi_exitmap)
 KVM_X86_OP(set_virtual_apic_mode)
-KVM_X86_OP_NULL(set_apic_access_page_addr)
+KVM_X86_OP(set_apic_access_page_addr)
 KVM_X86_OP(deliver_posted_interrupt)
-KVM_X86_OP_NULL(sync_pir_to_irr)
+KVM_X86_OP(sync_pir_to_irr)
 KVM_X86_OP(set_tss_addr)
 KVM_X86_OP(set_identity_map_addr)
 KVM_X86_OP(get_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
-KVM_X86_OP_NULL(has_wbinvd_exit)
+KVM_X86_OP(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
 KVM_X86_OP(get_l2_tsc_multiplier)
 KVM_X86_OP(write_tsc_offset)
@@ -96,32 +91,31 @@  KVM_X86_OP(write_tsc_multiplier)
 KVM_X86_OP(get_exit_info)
 KVM_X86_OP(check_intercept)
 KVM_X86_OP(handle_exit_irqoff)
-KVM_X86_OP_NULL(request_immediate_exit)
+KVM_X86_OP(request_immediate_exit)
 KVM_X86_OP(sched_in)
-KVM_X86_OP_NULL(update_cpu_dirty_logging)
-KVM_X86_OP_NULL(vcpu_blocking)
-KVM_X86_OP_NULL(vcpu_unblocking)
-KVM_X86_OP_NULL(update_pi_irte)
-KVM_X86_OP_NULL(start_assignment)
-KVM_X86_OP_NULL(apicv_post_state_restore)
-KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
-KVM_X86_OP_NULL(set_hv_timer)
-KVM_X86_OP_NULL(cancel_hv_timer)
+KVM_X86_OP(update_cpu_dirty_logging)
+KVM_X86_OP(vcpu_blocking)
+KVM_X86_OP(vcpu_unblocking)
+KVM_X86_OP(update_pi_irte)
+KVM_X86_OP(start_assignment)
+KVM_X86_OP(apicv_post_state_restore)
+KVM_X86_OP(dy_apicv_has_pending_interrupt)
+KVM_X86_OP(set_hv_timer)
+KVM_X86_OP(cancel_hv_timer)
 KVM_X86_OP(setup_mce)
 KVM_X86_OP(smi_allowed)
 KVM_X86_OP(enter_smm)
 KVM_X86_OP(leave_smm)
 KVM_X86_OP(enable_smi_window)
-KVM_X86_OP_NULL(mem_enc_op)
-KVM_X86_OP_NULL(mem_enc_reg_region)
-KVM_X86_OP_NULL(mem_enc_unreg_region)
+KVM_X86_OP(mem_enc_op)
+KVM_X86_OP(mem_enc_reg_region)
+KVM_X86_OP(mem_enc_unreg_region)
 KVM_X86_OP(get_msr_feature)
 KVM_X86_OP(can_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
-KVM_X86_OP_NULL(enable_direct_tlbflush)
-KVM_X86_OP_NULL(migrate_timers)
+KVM_X86_OP(enable_direct_tlbflush)
+KVM_X86_OP(migrate_timers)
 KVM_X86_OP(msr_filter_changed)
-KVM_X86_OP_NULL(complete_emulated_msr)
+KVM_X86_OP(complete_emulated_msr)
 
 #undef KVM_X86_OP
-#undef KVM_X86_OP_NULL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b2c3721b1c98..756806d2e801 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1538,14 +1538,12 @@  extern struct kvm_x86_ops kvm_x86_ops;
 
 #define KVM_X86_OP(func) \
 	DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
-#define KVM_X86_OP_NULL KVM_X86_OP
 #include <asm/kvm-x86-ops.h>
 
 static inline void kvm_ops_static_call_update(void)
 {
 #define KVM_X86_OP(func) \
 	static_call_update(kvm_x86_##func, kvm_x86_ops.func);
-#define KVM_X86_OP_NULL KVM_X86_OP
 #include <asm/kvm-x86-ops.h>
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8033eca6f3a1..ebab514ec82a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -129,7 +129,6 @@  EXPORT_SYMBOL_GPL(kvm_x86_ops);
 #define KVM_X86_OP(func)					     \
 	DEFINE_STATIC_CALL_NULL(kvm_x86_##func,			     \
 				*(((struct kvm_x86_ops *)0)->func));
-#define KVM_X86_OP_NULL KVM_X86_OP
 #include <asm/kvm-x86-ops.h>
 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);