diff mbox series

[v18,007/121] KVM: VMX: Reorder vmx initialization with kvm vendor initialization

Message ID 411a0b38c1a6f420a88b51cabf16ee871d6ca80d.1705965634.git.isaku.yamahata@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM TDX basic feature support | expand

Commit Message

Isaku Yamahata Jan. 22, 2024, 11:52 p.m. UTC
From: Isaku Yamahata <isaku.yamahata@intel.com>

To match vmx_exit cleanup.  Now vmx_init() is before kvm_x86_vendor_init(),
vmx_init() can initialize loaded_vmcss_on_cpu.  Oppertunistically move it
back into vmx_init().

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
---
v18:
- move the loaded_vmcss_on_cpu initialization to vmx_init().
- fix error path of vt_init(). by Chao and Binbin
---
 arch/x86/kvm/vmx/main.c    | 17 +++++++----------
 arch/x86/kvm/vmx/vmx.c     |  6 ++++--
 arch/x86/kvm/vmx/x86_ops.h |  2 --
 3 files changed, 11 insertions(+), 14 deletions(-)

Comments

Binbin Wu Jan. 23, 2024, 3:42 a.m. UTC | #1
On 1/23/2024 7:52 AM, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> To match vmx_exit cleanup.
Do you mean vt_exit()?
Shouldn't vt_init() and vt_exit() be symmetric right from the beginning in
the refactor patch (006/121)?

And also, since the reorder of kvm_x86_vendor_init() and vmx_init() is going
to happen, can we just skip moving around the init of loaded_vmcss_on_cpu?


> Now vmx_init() is before kvm_x86_vendor_init(),
> vmx_init() can initialize loaded_vmcss_on_cpu.  Oppertunistically move it
> back into vmx_init().
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v18:
> - move the loaded_vmcss_on_cpu initialization to vmx_init().
> - fix error path of vt_init(). by Chao and Binbin
> ---
>   arch/x86/kvm/vmx/main.c    | 17 +++++++----------
>   arch/x86/kvm/vmx/vmx.c     |  6 ++++--
>   arch/x86/kvm/vmx/x86_ops.h |  2 --
>   3 files changed, 11 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 18cecf12c7c8..443db8ec5cd5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
>   static int __init vt_init(void)
>   {
>   	unsigned int vcpu_size, vcpu_align;
> -	int cpu, r;
> +	int r;
>   
>   	if (!kvm_is_vmx_supported())
>   		return -EOPNOTSUPP;
> @@ -182,18 +182,14 @@ static int __init vt_init(void)
>   	 */
>   	hv_init_evmcs();
>   
> -	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> -	for_each_possible_cpu(cpu)
> -		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> -
> -	r = kvm_x86_vendor_init(&vt_init_ops);
> -	if (r)
> -		return r;
> -
>   	r = vmx_init();
>   	if (r)
>   		goto err_vmx_init;
>   
> +	r = kvm_x86_vendor_init(&vt_init_ops);
> +	if (r)
> +		goto err_vendor_init;
> +
>   	/*
>   	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
>   	 * exposed to userspace!
> @@ -207,9 +203,10 @@ static int __init vt_init(void)
>   	return 0;
>   
>   err_kvm_init:
> +	kvm_x86_vendor_exit();
> +err_vendor_init:
>   	vmx_exit();
>   err_vmx_init:
> -	kvm_x86_vendor_exit();
>   	return r;
>   }
>   module_init(vt_init);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 8efb956591d5..3f4dad3acb13 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -477,7 +477,7 @@ DEFINE_PER_CPU(struct vmcs *, current_vmcs);
>    * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
>    * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
>    */
> -DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
>   
>   static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
>   static DEFINE_SPINLOCK(vmx_vpid_lock);
> @@ -8528,8 +8528,10 @@ int __init vmx_init(void)
>   	if (r)
>   		return r;
>   
> -	for_each_possible_cpu(cpu)
> +	for_each_possible_cpu(cpu) {
> +		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
>   		pi_init_cpu(cpu);
> +	}
>   
>   	cpu_emergency_register_virt_callback(vmx_emergency_disable);
>   
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index b936388853ab..bca2d27b3dfd 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -14,8 +14,6 @@ static inline __init void hv_init_evmcs(void) {}
>   static inline void hv_reset_evmcs(void) {}
>   #endif /* IS_ENABLED(CONFIG_HYPERV) */
>   
> -DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> -
>   bool kvm_is_vmx_supported(void);
>   int __init vmx_init(void);
>   void vmx_exit(void);
Yuan Yao Jan. 29, 2024, 8:56 a.m. UTC | #2
On Mon, Jan 22, 2024 at 03:52:43PM -0800, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
>
> To match vmx_exit cleanup.  Now vmx_init() is before kvm_x86_vendor_init(),
> vmx_init() can initialize loaded_vmcss_on_cpu.  Oppertunistically move it
> back into vmx_init().
>
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v18:
> - move the loaded_vmcss_on_cpu initialization to vmx_init().
> - fix error path of vt_init(). by Chao and Binbin
> ---
>  arch/x86/kvm/vmx/main.c    | 17 +++++++----------
>  arch/x86/kvm/vmx/vmx.c     |  6 ++++--
>  arch/x86/kvm/vmx/x86_ops.h |  2 --
>  3 files changed, 11 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 18cecf12c7c8..443db8ec5cd5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
>  static int __init vt_init(void)
>  {
>  	unsigned int vcpu_size, vcpu_align;
> -	int cpu, r;
> +	int r;
>
>  	if (!kvm_is_vmx_supported())
>  		return -EOPNOTSUPP;
> @@ -182,18 +182,14 @@ static int __init vt_init(void)
>  	 */
>  	hv_init_evmcs();
>
> -	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> -	for_each_possible_cpu(cpu)
> -		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> -
> -	r = kvm_x86_vendor_init(&vt_init_ops);
> -	if (r)
> -		return r;
> -
>  	r = vmx_init();
>  	if (r)
>  		goto err_vmx_init;
>
> +	r = kvm_x86_vendor_init(&vt_init_ops);

Do kvm_x86_vendor_init() *after* vmx_init() leads to
"enable_ept" is used before set to 0 in some cases.

vmx_init() depends on "enable_ept" variable for below 2:
    vmx_setup_l1d_flush()
    allow_smaller_maxphyaddr = true;

And "enable_ept" can be set to 0 in:
kvm_x86_vendor_init()
    vmx_hardware_setup()

> +	if (r)
> +		goto err_vendor_init;
> +
>  	/*
>  	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
>  	 * exposed to userspace!
> @@ -207,9 +203,10 @@ static int __init vt_init(void)
>  	return 0;
>
>  err_kvm_init:
> +	kvm_x86_vendor_exit();
> +err_vendor_init:
>  	vmx_exit();
>  err_vmx_init:
> -	kvm_x86_vendor_exit();
>  	return r;
>  }
>  module_init(vt_init);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 8efb956591d5..3f4dad3acb13 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -477,7 +477,7 @@ DEFINE_PER_CPU(struct vmcs *, current_vmcs);
>   * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
>   * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
>   */
> -DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
>
>  static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
>  static DEFINE_SPINLOCK(vmx_vpid_lock);
> @@ -8528,8 +8528,10 @@ int __init vmx_init(void)
>  	if (r)
>  		return r;
>
> -	for_each_possible_cpu(cpu)
> +	for_each_possible_cpu(cpu) {
> +		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
>  		pi_init_cpu(cpu);
> +	}
>
>  	cpu_emergency_register_virt_callback(vmx_emergency_disable);
>
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index b936388853ab..bca2d27b3dfd 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -14,8 +14,6 @@ static inline __init void hv_init_evmcs(void) {}
>  static inline void hv_reset_evmcs(void) {}
>  #endif /* IS_ENABLED(CONFIG_HYPERV) */
>
> -DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> -
>  bool kvm_is_vmx_supported(void);
>  int __init vmx_init(void);
>  void vmx_exit(void);
> --
> 2.25.1
>
>
Xiaoyao Li Feb. 1, 2024, 1:47 a.m. UTC | #3
On 1/23/2024 7:52 AM, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
> 
> To match vmx_exit cleanup.  Now vmx_init() is before kvm_x86_vendor_init(),
> vmx_init() can initialize loaded_vmcss_on_cpu.  Oppertunistically move it
> back into vmx_init().

It sort of does a revert of Patch 05. Though I still don't get the 
reason why we need Patch 05, why not move this patch before patch 06, 
then we can drop Patch 05 and of course the revert part of this patch?

> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v18:
> - move the loaded_vmcss_on_cpu initialization to vmx_init().
> - fix error path of vt_init(). by Chao and Binbin
> ---
>   arch/x86/kvm/vmx/main.c    | 17 +++++++----------
>   arch/x86/kvm/vmx/vmx.c     |  6 ++++--
>   arch/x86/kvm/vmx/x86_ops.h |  2 --
>   3 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 18cecf12c7c8..443db8ec5cd5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
>   static int __init vt_init(void)
>   {
>   	unsigned int vcpu_size, vcpu_align;
> -	int cpu, r;
> +	int r;
>   
>   	if (!kvm_is_vmx_supported())
>   		return -EOPNOTSUPP;
> @@ -182,18 +182,14 @@ static int __init vt_init(void)
>   	 */
>   	hv_init_evmcs();
>   
> -	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> -	for_each_possible_cpu(cpu)
> -		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> -
> -	r = kvm_x86_vendor_init(&vt_init_ops);
> -	if (r)
> -		return r;
> -
>   	r = vmx_init();
>   	if (r)
>   		goto err_vmx_init;
>   
> +	r = kvm_x86_vendor_init(&vt_init_ops);
> +	if (r)
> +		goto err_vendor_init;
> +
>   	/*
>   	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
>   	 * exposed to userspace!
> @@ -207,9 +203,10 @@ static int __init vt_init(void)
>   	return 0;
>   
>   err_kvm_init:
> +	kvm_x86_vendor_exit();
> +err_vendor_init:
>   	vmx_exit();
>   err_vmx_init:
> -	kvm_x86_vendor_exit();
>   	return r;
>   }
>   module_init(vt_init);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 8efb956591d5..3f4dad3acb13 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -477,7 +477,7 @@ DEFINE_PER_CPU(struct vmcs *, current_vmcs);
>    * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
>    * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
>    */
> -DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> +static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
>   
>   static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
>   static DEFINE_SPINLOCK(vmx_vpid_lock);
> @@ -8528,8 +8528,10 @@ int __init vmx_init(void)
>   	if (r)
>   		return r;
>   
> -	for_each_possible_cpu(cpu)
> +	for_each_possible_cpu(cpu) {
> +		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
>   		pi_init_cpu(cpu);
> +	}
>   
>   	cpu_emergency_register_virt_callback(vmx_emergency_disable);
>   
> diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
> index b936388853ab..bca2d27b3dfd 100644
> --- a/arch/x86/kvm/vmx/x86_ops.h
> +++ b/arch/x86/kvm/vmx/x86_ops.h
> @@ -14,8 +14,6 @@ static inline __init void hv_init_evmcs(void) {}
>   static inline void hv_reset_evmcs(void) {}
>   #endif /* IS_ENABLED(CONFIG_HYPERV) */
>   
> -DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
> -
>   bool kvm_is_vmx_supported(void);
>   int __init vmx_init(void);
>   void vmx_exit(void);
Xiaoyao Li Feb. 1, 2024, 9:34 a.m. UTC | #4
On 1/23/2024 7:52 AM, isaku.yamahata@intel.com wrote:
> From: Isaku Yamahata <isaku.yamahata@intel.com>
> 
> To match vmx_exit cleanup.  Now vmx_init() is before kvm_x86_vendor_init(),
> vmx_init() can initialize loaded_vmcss_on_cpu.  Oppertunistically move it
> back into vmx_init().
> 
> Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
> ---
> v18:
> - move the loaded_vmcss_on_cpu initialization to vmx_init().
> - fix error path of vt_init(). by Chao and Binbin
> ---
>   arch/x86/kvm/vmx/main.c    | 17 +++++++----------
>   arch/x86/kvm/vmx/vmx.c     |  6 ++++--
>   arch/x86/kvm/vmx/x86_ops.h |  2 --
>   3 files changed, 11 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> index 18cecf12c7c8..443db8ec5cd5 100644
> --- a/arch/x86/kvm/vmx/main.c
> +++ b/arch/x86/kvm/vmx/main.c
> @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
>   static int __init vt_init(void)
>   {
>   	unsigned int vcpu_size, vcpu_align;
> -	int cpu, r;
> +	int r;
>   
>   	if (!kvm_is_vmx_supported())
>   		return -EOPNOTSUPP;
> @@ -182,18 +182,14 @@ static int __init vt_init(void)
>   	 */
>   	hv_init_evmcs();
>   
> -	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> -	for_each_possible_cpu(cpu)
> -		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> -
> -	r = kvm_x86_vendor_init(&vt_init_ops);
> -	if (r)
> -		return r;
> -
>   	r = vmx_init();
>   	if (r)
>   		goto err_vmx_init;
>   
> +	r = kvm_x86_vendor_init(&vt_init_ops);
> +	if (r)
> +		goto err_vendor_init;
> +

we cannot simply change the calling order of vmx_init() and 
kvm_x86_vendor_init(). There is dependency between them.

e.g.,

kvm_x86_vendor_init()
   -> ops->hardware_setup()
	-> vmx_hardware_setup()

will update 'enable_ept' based on hardware capability (e.g., if the 
hardware support EPT or not), while 'enable_ept' is used in vmx_init().
Isaku Yamahata Feb. 26, 2024, 6:48 p.m. UTC | #5
On Thu, Feb 01, 2024 at 05:34:44PM +0800,
Xiaoyao Li <xiaoyao.li@intel.com> wrote:

> > diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
> > index 18cecf12c7c8..443db8ec5cd5 100644
> > --- a/arch/x86/kvm/vmx/main.c
> > +++ b/arch/x86/kvm/vmx/main.c
> > @@ -171,7 +171,7 @@ struct kvm_x86_init_ops vt_init_ops __initdata = {
> >   static int __init vt_init(void)
> >   {
> >   	unsigned int vcpu_size, vcpu_align;
> > -	int cpu, r;
> > +	int r;
> >   	if (!kvm_is_vmx_supported())
> >   		return -EOPNOTSUPP;
> > @@ -182,18 +182,14 @@ static int __init vt_init(void)
> >   	 */
> >   	hv_init_evmcs();
> > -	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
> > -	for_each_possible_cpu(cpu)
> > -		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
> > -
> > -	r = kvm_x86_vendor_init(&vt_init_ops);
> > -	if (r)
> > -		return r;
> > -
> >   	r = vmx_init();
> >   	if (r)
> >   		goto err_vmx_init;
> > +	r = kvm_x86_vendor_init(&vt_init_ops);
> > +	if (r)
> > +		goto err_vendor_init;
> > +
> 
> we cannot simply change the calling order of vmx_init() and
> kvm_x86_vendor_init(). There is dependency between them.
> 
> e.g.,
> 
> kvm_x86_vendor_init()
>   -> ops->hardware_setup()
> 	-> vmx_hardware_setup()
> 
> will update 'enable_ept' based on hardware capability (e.g., if the hardware
> support EPT or not), while 'enable_ept' is used in vmx_init().

I gave up this clean up to drop this patch with v19.
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 18cecf12c7c8..443db8ec5cd5 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -171,7 +171,7 @@  struct kvm_x86_init_ops vt_init_ops __initdata = {
 static int __init vt_init(void)
 {
 	unsigned int vcpu_size, vcpu_align;
-	int cpu, r;
+	int r;
 
 	if (!kvm_is_vmx_supported())
 		return -EOPNOTSUPP;
@@ -182,18 +182,14 @@  static int __init vt_init(void)
 	 */
 	hv_init_evmcs();
 
-	/* vmx_hardware_disable() accesses loaded_vmcss_on_cpu. */
-	for_each_possible_cpu(cpu)
-		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
-
-	r = kvm_x86_vendor_init(&vt_init_ops);
-	if (r)
-		return r;
-
 	r = vmx_init();
 	if (r)
 		goto err_vmx_init;
 
+	r = kvm_x86_vendor_init(&vt_init_ops);
+	if (r)
+		goto err_vendor_init;
+
 	/*
 	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
 	 * exposed to userspace!
@@ -207,9 +203,10 @@  static int __init vt_init(void)
 	return 0;
 
 err_kvm_init:
+	kvm_x86_vendor_exit();
+err_vendor_init:
 	vmx_exit();
 err_vmx_init:
-	kvm_x86_vendor_exit();
 	return r;
 }
 module_init(vt_init);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8efb956591d5..3f4dad3acb13 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -477,7 +477,7 @@  DEFINE_PER_CPU(struct vmcs *, current_vmcs);
  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
  */
-DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
+static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
 
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -8528,8 +8528,10 @@  int __init vmx_init(void)
 	if (r)
 		return r;
 
-	for_each_possible_cpu(cpu)
+	for_each_possible_cpu(cpu) {
+		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
 		pi_init_cpu(cpu);
+	}
 
 	cpu_emergency_register_virt_callback(vmx_emergency_disable);
 
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index b936388853ab..bca2d27b3dfd 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -14,8 +14,6 @@  static inline __init void hv_init_evmcs(void) {}
 static inline void hv_reset_evmcs(void) {}
 #endif /* IS_ENABLED(CONFIG_HYPERV) */
 
-DECLARE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
-
 bool kvm_is_vmx_supported(void);
 int __init vmx_init(void);
 void vmx_exit(void);