diff mbox

[v2] kvm: nVMX: VMCLEAR an active shadow VMCS after last use

Message ID 1477668579-22555-1-git-send-email-jmattson@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jim Mattson Oct. 28, 2016, 3:29 p.m. UTC
After a successful VM-entry with the "VMCS shadowing" VM-execution
control set, the shadow VMCS referenced by the VMCS link pointer field
in the current VMCS becomes active on the logical processor.

A VMCS that is made active on more than one logical processor may become
corrupted. Therefore, before an active VMCS can be migrated to another
logical processor, the first logical processor must execute a VMCLEAR
for the active VMCS. VMCLEAR both ensures that all VMCS data are written
to memory and makes the VMCS inactive.

Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-By: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)

Comments

Paolo Bonzini Nov. 2, 2016, 5:23 p.m. UTC | #1
On 28/10/2016 17:29, Jim Mattson wrote:
> After a successful VM-entry with the "VMCS shadowing" VM-execution
> control set, the shadow VMCS referenced by the VMCS link pointer field
> in the current VMCS becomes active on the logical processor.
> 
> A VMCS that is made active on more than one logical processor may become
> corrupted. Therefore, before an active VMCS can be migrated to another
> logical processor, the first logical processor must execute a VMCLEAR
> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
> to memory and makes the VMCS inactive.
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
> Reviewed-By: David Matlack <dmatlack@google.com>

The patch looks good but---sorry for the possibly stupid
question---where was v1?

Paolo

> ---
>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>  1 file changed, 15 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index cf0b7be..046b03f 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -187,6 +187,7 @@ struct vmcs {
>   */
>  struct loaded_vmcs {
>  	struct vmcs *vmcs;
> +	struct vmcs *shadow_vmcs;
>  	int cpu;
>  	int launched;
>  	struct list_head loaded_vmcss_on_cpu_link;
> @@ -411,7 +412,6 @@ struct nested_vmx {
>  	 * memory during VMXOFF, VMCLEAR, VMPTRLD.
>  	 */
>  	struct vmcs12 *cached_vmcs12;
> -	struct vmcs *current_shadow_vmcs;
>  	/*
>  	 * Indicates if the shadow vmcs must be updated with the
>  	 * data hold by vmcs12
> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>  {
>  	vmcs_clear(loaded_vmcs->vmcs);
> +	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
> +		vmcs_clear(loaded_vmcs->shadow_vmcs);
>  	loaded_vmcs->cpu = -1;
>  	loaded_vmcs->launched = 0;
>  }
> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>  	loaded_vmcs_clear(loaded_vmcs);
>  	free_vmcs(loaded_vmcs->vmcs);
>  	loaded_vmcs->vmcs = NULL;
> +	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>  }
>  
>  static void free_kvm_area(void)
> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>  	if (!item)
>  		return NULL;
>  	item->vmcs02.vmcs = alloc_vmcs();
> +	item->vmcs02.shadow_vmcs = NULL;
>  	if (!item->vmcs02.vmcs) {
>  		kfree(item);
>  		return NULL;
> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  		shadow_vmcs->revision_id |= (1u << 31);
>  		/* init shadow vmcs */
>  		vmcs_clear(shadow_vmcs);
> -		vmx->nested.current_shadow_vmcs = shadow_vmcs;
> +		vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>  	}
>  
>  	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>  		free_page((unsigned long)vmx->nested.msr_bitmap);
>  		vmx->nested.msr_bitmap = NULL;
>  	}
> -	if (enable_shadow_vmcs)
> -		free_vmcs(vmx->nested.current_shadow_vmcs);
> +	if (enable_shadow_vmcs) {
> +		vmcs_clear(vmx->vmcs01.shadow_vmcs);
> +		free_vmcs(vmx->vmcs01.shadow_vmcs);
> +		vmx->vmcs01.shadow_vmcs = NULL;
> +	}
>  	kfree(vmx->nested.cached_vmcs12);
>  	/* Unpin physical memory we referred to in current vmcs02 */
>  	if (vmx->nested.apic_access_page) {
> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>  	int i;
>  	unsigned long field;
>  	u64 field_value;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>  	const unsigned long *fields = shadow_read_write_fields;
>  	const int num_fields = max_shadow_read_write_fields;
>  
> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>  	int i, q;
>  	unsigned long field;
>  	u64 field_value = 0;
> -	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
> +	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>  
>  	vmcs_load(shadow_vmcs);
>  
> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>  			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>  				      SECONDARY_EXEC_SHADOW_VMCS);
>  			vmcs_write64(VMCS_LINK_POINTER,
> -				     __pa(vmx->nested.current_shadow_vmcs));
> +				     __pa(vmx->vmcs01.shadow_vmcs));
>  			vmx->nested.sync_shadow_vmcs = true;
>  		}
>  	}
> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>  
>  	vmx->loaded_vmcs = &vmx->vmcs01;
>  	vmx->loaded_vmcs->vmcs = alloc_vmcs();
> +	vmx->loaded_vmcs->shadow_vmcs = NULL;
>  	if (!vmx->loaded_vmcs->vmcs)
>  		goto free_msrs;
>  	if (!vmm_exclusive)
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jim Mattson Nov. 2, 2016, 5:56 p.m. UTC | #2
Sorry; I changed the title after reworking the patch. V1 was "[PATCH]
kvm: nVMX: Track active shadow VMCSs" (15 July 2016).

On Wed, Nov 2, 2016 at 10:23 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 28/10/2016 17:29, Jim Mattson wrote:
>> After a successful VM-entry with the "VMCS shadowing" VM-execution
>> control set, the shadow VMCS referenced by the VMCS link pointer field
>> in the current VMCS becomes active on the logical processor.
>>
>> A VMCS that is made active on more than one logical processor may become
>> corrupted. Therefore, before an active VMCS can be migrated to another
>> logical processor, the first logical processor must execute a VMCLEAR
>> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
>> to memory and makes the VMCS inactive.
>>
>> Signed-off-by: Jim Mattson <jmattson@google.com>
>> Reviewed-By: David Matlack <dmatlack@google.com>
>
> The patch looks good but---sorry for the possibly stupid
> question---where was v1?
>
> Paolo
>
>> ---
>>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>>  1 file changed, 15 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>> index cf0b7be..046b03f 100644
>> --- a/arch/x86/kvm/vmx.c
>> +++ b/arch/x86/kvm/vmx.c
>> @@ -187,6 +187,7 @@ struct vmcs {
>>   */
>>  struct loaded_vmcs {
>>       struct vmcs *vmcs;
>> +     struct vmcs *shadow_vmcs;
>>       int cpu;
>>       int launched;
>>       struct list_head loaded_vmcss_on_cpu_link;
>> @@ -411,7 +412,6 @@ struct nested_vmx {
>>        * memory during VMXOFF, VMCLEAR, VMPTRLD.
>>        */
>>       struct vmcs12 *cached_vmcs12;
>> -     struct vmcs *current_shadow_vmcs;
>>       /*
>>        * Indicates if the shadow vmcs must be updated with the
>>        * data hold by vmcs12
>> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>>  {
>>       vmcs_clear(loaded_vmcs->vmcs);
>> +     if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
>> +             vmcs_clear(loaded_vmcs->shadow_vmcs);
>>       loaded_vmcs->cpu = -1;
>>       loaded_vmcs->launched = 0;
>>  }
>> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>>       loaded_vmcs_clear(loaded_vmcs);
>>       free_vmcs(loaded_vmcs->vmcs);
>>       loaded_vmcs->vmcs = NULL;
>> +     WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>>  }
>>
>>  static void free_kvm_area(void)
>> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>>       if (!item)
>>               return NULL;
>>       item->vmcs02.vmcs = alloc_vmcs();
>> +     item->vmcs02.shadow_vmcs = NULL;
>>       if (!item->vmcs02.vmcs) {
>>               kfree(item);
>>               return NULL;
>> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>               shadow_vmcs->revision_id |= (1u << 31);
>>               /* init shadow vmcs */
>>               vmcs_clear(shadow_vmcs);
>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>> +             vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>>       }
>>
>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>>               free_page((unsigned long)vmx->nested.msr_bitmap);
>>               vmx->nested.msr_bitmap = NULL;
>>       }
>> -     if (enable_shadow_vmcs)
>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>> +     if (enable_shadow_vmcs) {
>> +             vmcs_clear(vmx->vmcs01.shadow_vmcs);
>> +             free_vmcs(vmx->vmcs01.shadow_vmcs);
>> +             vmx->vmcs01.shadow_vmcs = NULL;
>> +     }
>>       kfree(vmx->nested.cached_vmcs12);
>>       /* Unpin physical memory we referred to in current vmcs02 */
>>       if (vmx->nested.apic_access_page) {
>> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>       int i;
>>       unsigned long field;
>>       u64 field_value;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>       const unsigned long *fields = shadow_read_write_fields;
>>       const int num_fields = max_shadow_read_write_fields;
>>
>> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>       int i, q;
>>       unsigned long field;
>>       u64 field_value = 0;
>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>
>>       vmcs_load(shadow_vmcs);
>>
>> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>>                       vmcs_write64(VMCS_LINK_POINTER,
>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>> +                                  __pa(vmx->vmcs01.shadow_vmcs));
>>                       vmx->nested.sync_shadow_vmcs = true;
>>               }
>>       }
>> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>>
>>       vmx->loaded_vmcs = &vmx->vmcs01;
>>       vmx->loaded_vmcs->vmcs = alloc_vmcs();
>> +     vmx->loaded_vmcs->shadow_vmcs = NULL;
>>       if (!vmx->loaded_vmcs->vmcs)
>>               goto free_msrs;
>>       if (!vmm_exclusive)
>>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Nov. 2, 2016, 6:04 p.m. UTC | #3
On 02/11/2016 18:56, Jim Mattson wrote:
> Sorry; I changed the title after reworking the patch. V1 was "[PATCH]
> kvm: nVMX: Track active shadow VMCSs" (15 July 2016).

Aha, now it makes sense.  I'm queuing it for -rc4.

Thanks,

Paolo

> On Wed, Nov 2, 2016 at 10:23 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>> On 28/10/2016 17:29, Jim Mattson wrote:
>>> After a successful VM-entry with the "VMCS shadowing" VM-execution
>>> control set, the shadow VMCS referenced by the VMCS link pointer field
>>> in the current VMCS becomes active on the logical processor.
>>>
>>> A VMCS that is made active on more than one logical processor may become
>>> corrupted. Therefore, before an active VMCS can be migrated to another
>>> logical processor, the first logical processor must execute a VMCLEAR
>>> for the active VMCS. VMCLEAR both ensures that all VMCS data are written
>>> to memory and makes the VMCS inactive.
>>>
>>> Signed-off-by: Jim Mattson <jmattson@google.com>
>>> Reviewed-By: David Matlack <dmatlack@google.com>
>>
>> The patch looks good but---sorry for the possibly stupid
>> question---where was v1?
>>
>> Paolo
>>
>>> ---
>>>  arch/x86/kvm/vmx.c | 22 +++++++++++++++-------
>>>  1 file changed, 15 insertions(+), 7 deletions(-)
>>>
>>> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
>>> index cf0b7be..046b03f 100644
>>> --- a/arch/x86/kvm/vmx.c
>>> +++ b/arch/x86/kvm/vmx.c
>>> @@ -187,6 +187,7 @@ struct vmcs {
>>>   */
>>>  struct loaded_vmcs {
>>>       struct vmcs *vmcs;
>>> +     struct vmcs *shadow_vmcs;
>>>       int cpu;
>>>       int launched;
>>>       struct list_head loaded_vmcss_on_cpu_link;
>>> @@ -411,7 +412,6 @@ struct nested_vmx {
>>>        * memory during VMXOFF, VMCLEAR, VMPTRLD.
>>>        */
>>>       struct vmcs12 *cached_vmcs12;
>>> -     struct vmcs *current_shadow_vmcs;
>>>       /*
>>>        * Indicates if the shadow vmcs must be updated with the
>>>        * data hold by vmcs12
>>> @@ -1419,6 +1419,8 @@ static void vmcs_clear(struct vmcs *vmcs)
>>>  static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
>>>  {
>>>       vmcs_clear(loaded_vmcs->vmcs);
>>> +     if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
>>> +             vmcs_clear(loaded_vmcs->shadow_vmcs);
>>>       loaded_vmcs->cpu = -1;
>>>       loaded_vmcs->launched = 0;
>>>  }
>>> @@ -3562,6 +3564,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
>>>       loaded_vmcs_clear(loaded_vmcs);
>>>       free_vmcs(loaded_vmcs->vmcs);
>>>       loaded_vmcs->vmcs = NULL;
>>> +     WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
>>>  }
>>>
>>>  static void free_kvm_area(void)
>>> @@ -6696,6 +6699,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
>>>       if (!item)
>>>               return NULL;
>>>       item->vmcs02.vmcs = alloc_vmcs();
>>> +     item->vmcs02.shadow_vmcs = NULL;
>>>       if (!item->vmcs02.vmcs) {
>>>               kfree(item);
>>>               return NULL;
>>> @@ -7072,7 +7076,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>>>               shadow_vmcs->revision_id |= (1u << 31);
>>>               /* init shadow vmcs */
>>>               vmcs_clear(shadow_vmcs);
>>> -             vmx->nested.current_shadow_vmcs = shadow_vmcs;
>>> +             vmx->vmcs01.shadow_vmcs = shadow_vmcs;
>>>       }
>>>
>>>       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
>>> @@ -7174,8 +7178,11 @@ static void free_nested(struct vcpu_vmx *vmx)
>>>               free_page((unsigned long)vmx->nested.msr_bitmap);
>>>               vmx->nested.msr_bitmap = NULL;
>>>       }
>>> -     if (enable_shadow_vmcs)
>>> -             free_vmcs(vmx->nested.current_shadow_vmcs);
>>> +     if (enable_shadow_vmcs) {
>>> +             vmcs_clear(vmx->vmcs01.shadow_vmcs);
>>> +             free_vmcs(vmx->vmcs01.shadow_vmcs);
>>> +             vmx->vmcs01.shadow_vmcs = NULL;
>>> +     }
>>>       kfree(vmx->nested.cached_vmcs12);
>>>       /* Unpin physical memory we referred to in current vmcs02 */
>>>       if (vmx->nested.apic_access_page) {
>>> @@ -7352,7 +7359,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
>>>       int i;
>>>       unsigned long field;
>>>       u64 field_value;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>>       const unsigned long *fields = shadow_read_write_fields;
>>>       const int num_fields = max_shadow_read_write_fields;
>>>
>>> @@ -7401,7 +7408,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
>>>       int i, q;
>>>       unsigned long field;
>>>       u64 field_value = 0;
>>> -     struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
>>> +     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
>>>
>>>       vmcs_load(shadow_vmcs);
>>>
>>> @@ -7591,7 +7598,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
>>>                       vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
>>>                                     SECONDARY_EXEC_SHADOW_VMCS);
>>>                       vmcs_write64(VMCS_LINK_POINTER,
>>> -                                  __pa(vmx->nested.current_shadow_vmcs));
>>> +                                  __pa(vmx->vmcs01.shadow_vmcs));
>>>                       vmx->nested.sync_shadow_vmcs = true;
>>>               }
>>>       }
>>> @@ -9156,6 +9163,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>>>
>>>       vmx->loaded_vmcs = &vmx->vmcs01;
>>>       vmx->loaded_vmcs->vmcs = alloc_vmcs();
>>> +     vmx->loaded_vmcs->shadow_vmcs = NULL;
>>>       if (!vmx->loaded_vmcs->vmcs)
>>>               goto free_msrs;
>>>       if (!vmm_exclusive)
>>>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index cf0b7be..046b03f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -187,6 +187,7 @@  struct vmcs {
  */
 struct loaded_vmcs {
 	struct vmcs *vmcs;
+	struct vmcs *shadow_vmcs;
 	int cpu;
 	int launched;
 	struct list_head loaded_vmcss_on_cpu_link;
@@ -411,7 +412,6 @@  struct nested_vmx {
 	 * memory during VMXOFF, VMCLEAR, VMPTRLD.
 	 */
 	struct vmcs12 *cached_vmcs12;
-	struct vmcs *current_shadow_vmcs;
 	/*
 	 * Indicates if the shadow vmcs must be updated with the
 	 * data hold by vmcs12
@@ -1419,6 +1419,8 @@  static void vmcs_clear(struct vmcs *vmcs)
 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
 {
 	vmcs_clear(loaded_vmcs->vmcs);
+	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+		vmcs_clear(loaded_vmcs->shadow_vmcs);
 	loaded_vmcs->cpu = -1;
 	loaded_vmcs->launched = 0;
 }
@@ -3562,6 +3564,7 @@  static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
 	loaded_vmcs_clear(loaded_vmcs);
 	free_vmcs(loaded_vmcs->vmcs);
 	loaded_vmcs->vmcs = NULL;
+	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
 }
 
 static void free_kvm_area(void)
@@ -6696,6 +6699,7 @@  static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
 	if (!item)
 		return NULL;
 	item->vmcs02.vmcs = alloc_vmcs();
+	item->vmcs02.shadow_vmcs = NULL;
 	if (!item->vmcs02.vmcs) {
 		kfree(item);
 		return NULL;
@@ -7072,7 +7076,7 @@  static int handle_vmon(struct kvm_vcpu *vcpu)
 		shadow_vmcs->revision_id |= (1u << 31);
 		/* init shadow vmcs */
 		vmcs_clear(shadow_vmcs);
-		vmx->nested.current_shadow_vmcs = shadow_vmcs;
+		vmx->vmcs01.shadow_vmcs = shadow_vmcs;
 	}
 
 	INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@@ -7174,8 +7178,11 @@  static void free_nested(struct vcpu_vmx *vmx)
 		free_page((unsigned long)vmx->nested.msr_bitmap);
 		vmx->nested.msr_bitmap = NULL;
 	}
-	if (enable_shadow_vmcs)
-		free_vmcs(vmx->nested.current_shadow_vmcs);
+	if (enable_shadow_vmcs) {
+		vmcs_clear(vmx->vmcs01.shadow_vmcs);
+		free_vmcs(vmx->vmcs01.shadow_vmcs);
+		vmx->vmcs01.shadow_vmcs = NULL;
+	}
 	kfree(vmx->nested.cached_vmcs12);
 	/* Unpin physical memory we referred to in current vmcs02 */
 	if (vmx->nested.apic_access_page) {
@@ -7352,7 +7359,7 @@  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 	int i;
 	unsigned long field;
 	u64 field_value;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 	const unsigned long *fields = shadow_read_write_fields;
 	const int num_fields = max_shadow_read_write_fields;
 
@@ -7401,7 +7408,7 @@  static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 	int i, q;
 	unsigned long field;
 	u64 field_value = 0;
-	struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
 
 	vmcs_load(shadow_vmcs);
 
@@ -7591,7 +7598,7 @@  static int handle_vmptrld(struct kvm_vcpu *vcpu)
 			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
 				      SECONDARY_EXEC_SHADOW_VMCS);
 			vmcs_write64(VMCS_LINK_POINTER,
-				     __pa(vmx->nested.current_shadow_vmcs));
+				     __pa(vmx->vmcs01.shadow_vmcs));
 			vmx->nested.sync_shadow_vmcs = true;
 		}
 	}
@@ -9156,6 +9163,7 @@  static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
 	vmx->loaded_vmcs = &vmx->vmcs01;
 	vmx->loaded_vmcs->vmcs = alloc_vmcs();
+	vmx->loaded_vmcs->shadow_vmcs = NULL;
 	if (!vmx->loaded_vmcs->vmcs)
 		goto free_msrs;
 	if (!vmm_exclusive)