diff mbox

[8/8] kvm: nVMX: Defer gpa->hpa lookups for set_vmx_state

Message ID 1480536229-11754-9-git-send-email-jmattson@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jim Mattson Nov. 30, 2016, 8:03 p.m. UTC
Prepare_vmcs02 needs to be able to translate some guest physical
addresses to host physical addresses, but this isn't possible until
the vcpu is running.

When entering VMX non-root operation from set_vmx_state, queue a request
to perform the gpa->hpa lookups at the next vcpu_run.

Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/vmx.c              | 21 ++++++++++++++++-----
 arch/x86/kvm/x86.c              |  2 ++
 3 files changed, 20 insertions(+), 5 deletions(-)

Comments

Paolo Bonzini Dec. 9, 2016, 3:35 p.m. UTC | #1
On 30/11/2016 21:03, Jim Mattson wrote:
> Prepare_vmcs02 needs to be able to translate some guest physical
> addresses to host physical addresses, but this isn't possible until
> the vcpu is running.

Can you explain why in more detail?  Also, please just squash this in
patch 7.

Paolo

> When entering VMX non-root operation from set_vmx_state, queue a request
> to perform the gpa->hpa lookups at the next vcpu_run.
> 
> Signed-off-by: Jim Mattson <jmattson@google.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jim Mattson Dec. 9, 2016, 4:26 p.m. UTC | #2
This may be an artifact of our userspace agent that isn't shared by
qemu. I can drop this from the upstream patch set, if you like.

On Fri, Dec 9, 2016 at 7:35 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>
> On 30/11/2016 21:03, Jim Mattson wrote:
>> Prepare_vmcs02 needs to be able to translate some guest physical
>> addresses to host physical addresses, but this isn't possible until
>> the vcpu is running.
>
> Can you explain why in more detail?  Also, please just squash this in
> patch 7.
>
> Paolo
>
>> When entering VMX non-root operation from set_vmx_state, queue a request
>> to perform the gpa->hpa lookups at the next vcpu_run.
>>
>> Signed-off-by: Jim Mattson <jmattson@google.com>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Dec. 10, 2016, 7:48 a.m. UTC | #3
> This may be an artifact of our userspace agent that isn't shared by
> qemu. I can drop this from the upstream patch set, if you like.

No, I think it makes sense, it just needs a better commit message.

Paolo

> On Fri, Dec 9, 2016 at 7:35 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> >
> >
> > On 30/11/2016 21:03, Jim Mattson wrote:
> >> Prepare_vmcs02 needs to be able to translate some guest physical
> >> addresses to host physical addresses, but this isn't possible until
> >> the vcpu is running.
> >
> > Can you explain why in more detail?  Also, please just squash this in
> > patch 7.
> >
> > Paolo
> >
> >> When entering VMX non-root operation from set_vmx_state, queue a request
> >> to perform the gpa->hpa lookups at the next vcpu_run.
> >>
> >> Signed-off-by: Jim Mattson <jmattson@google.com>
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d6be6f1..0f54387 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -73,6 +73,7 @@ 
 #define KVM_REQ_HV_RESET          28
 #define KVM_REQ_HV_EXIT           29
 #define KVM_REQ_HV_STIMER         30
+#define KVM_REQ_GET_VMCS12_PAGES  31
 
 #define CR0_RESERVED_BITS                                               \
 	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1025,6 +1026,7 @@  struct kvm_x86_ops {
 			     struct kvm_vmx_state __user *user_vmx_state);
 	int (*set_vmx_state)(struct kvm_vcpu *vcpu,
 			     struct kvm_vmx_state __user *user_vmx_state);
+	void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
 };
 
 struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d75c183..5c459ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9417,10 +9417,10 @@  static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
 static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
 					       struct vmcs12 *vmcs12);
 
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
-					struct vmcs12 *vmcs12)
+static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	u64 hpa;
 
 	if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
@@ -10265,8 +10265,6 @@  static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
 
 	prepare_vmcs02(vcpu, vmcs12);
 
-	nested_get_vmcs12_pages(vcpu, vmcs12);
-
 	msr_entry_idx = nested_vmx_load_msr(vcpu,
 					    vmcs12->vm_entry_msr_load_addr,
 					    vmcs12->vm_entry_msr_load_count);
@@ -10351,6 +10349,8 @@  static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	if (ret)
 		return ret;
 
+	nested_get_vmcs12_pages(vcpu);
+
 	if (unlikely(vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)) {
 		vmx->nested.nested_run_pending = 0;
 		return kvm_vcpu_halt(vcpu);
@@ -11352,6 +11352,7 @@  static int set_vmcs_cache(struct kvm_vcpu *vcpu,
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 	u32 exit_qual;
+	int ret;
 
 	if (vmx_state.data_size < VMCS12_SIZE ||
 	    vmx_state.current_vmptr == vmx_state.vmxon_ptr ||
@@ -11370,7 +11371,16 @@  static int set_vmcs_cache(struct kvm_vcpu *vcpu,
 	if (check_vmentry_prereqs(vcpu, vmcs12) ||
 	    check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
 		return -EINVAL;
-	return enter_vmx_non_root_mode(vcpu);
+	ret = enter_vmx_non_root_mode(vcpu);
+	if (ret)
+		return ret;
+
+	/*
+	 * This request will result in a call to
+	 * nested_get_vmcs12_pages before the next VM-entry.
+	 */
+	kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+	return 0;
 }
 
 static int set_vmx_state(struct kvm_vcpu *vcpu,
@@ -11541,6 +11551,7 @@  static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
 	.get_vmx_state = get_vmx_state,
 	.set_vmx_state = set_vmx_state,
+	.get_vmcs12_pages = nested_get_vmcs12_pages,
 };
 
 static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e249215..39c1517 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6551,6 +6551,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	bool req_immediate_exit = false;
 
 	if (vcpu->requests) {
+		if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
+			kvm_x86_ops->get_vmcs12_pages(vcpu);
 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
 			kvm_mmu_unload(vcpu);
 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))