diff mbox

[08/22] KVM: nVMX: Cache shadow vmcs12 on VMEntry and flush to memory on VMExit

Message ID d911394c-dc18-8bfa-fee9-c10286f4216a@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini July 18, 2018, 5:57 p.m. UTC
On 18/07/2018 19:40, Paolo Bonzini wrote:
> On 23/06/2018 01:35, Liran Alon wrote:
>> @@ -12392,6 +12455,17 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
>>  			prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
>>  				       exit_qualification);
>>  
>> +		/*
>> +		 * Must happen outside of sync_vmcs12() as it will
>> +		 * also be used to capture vmcs12 cache as part of
>> +		 * capturing nVMX state for snapshot (migration).
>> +		 *
>> +		 * Otherwise, this flush will dirty guest memory at a
>> +		 * point it is already assumed by user-space to be
>> +		 * immutable.
>> +		 */
>> +		nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
>> +
>>  		if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
>>  					 vmcs12->vm_exit_msr_store_count))
>>  			nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
>>
> 
> So I assume this would this be another page at the end of the nested
> state.  Do you already have a patch for that?
> 

Should be something like this :



The only question I have, should shadow_vmcs12 be included if we're
not in guest mode?  Above I'm assuming no, because it should be flushed
to memory on vmexit (including SMM entry).

Thanks,

Paolo
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index efe3205feb48..41ca12cbd82b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -13152,9 +13152,15 @@  static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 		kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
 		kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
 
-		if (vmx->nested.current_vmptr != -1ull)
+		if (vmx->nested.current_vmptr != -1ull) {
 			kvm_state.size += VMCS12_SIZE;
 
+			if (is_guest_mode(vcpu) &&
+			    nested_cpu_has_shadow_vmcs(vmcs12) &&
+			    vmcs12->vmcs_link_pointer != -1ull)
+				kvm_state.size += VMCS12_SIZE;
+		}
+
 		if (vmx->nested.smm.vmxon)
 			kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
 
@@ -13193,6 +13198,13 @@  static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
 	if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
 		return -EFAULT;
 
+	if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+	    vmcs12->vmcs_link_pointer != -1ull) {
+		if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
+				 get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+			return -EFAULT;
+	}
+
 out:
 	return kvm_state.size;
 }
@@ -13244,6 +13256,23 @@  static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
 	    vmcs12->hdr.shadow_vmcs)
 		return -EINVAL;
 
+	if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) &&
+	    nested_cpu_has_shadow_vmcs(vmcs12) &&
+	    vmcs12->vmcs_link_pointer != -1ull) {
+		struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
+		if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+			return -EINVAL;
+
+		if (copy_from_user(shadow_vmcs12,
+				   user_kvm_nested_state->data + VMCS12_SIZE,
+				   sizeof(*vmcs12)))
+			return -EFAULT;
+
+		if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+		    !shadow_vmcs12->hdr.shadow_vmcs)
+			return -EINVAL;
+	}
+
 	vmx_leave_nested(vcpu);
 	if (kvm_state->vmx.vmxon_pa == -1ull)
 		return 0;