diff mbox

[RFC,2/3] KVM: x86: use memory_prepare in fetch helper function

Message ID 1399336859-7227-3-git-send-email-bsd@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bandan Das May 6, 2014, 12:40 a.m. UTC
Insn fetch fastpath function. Not that
arch.walk_mmu->gva_to_gpa can't be used but let's
piggyback on top of interface meant for our purpose

Signed-off-by: Bandan Das <bsd@redhat.com>
---
 arch/x86/kvm/x86.c | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini May 6, 2014, 8:27 a.m. UTC | #1
Il 06/05/2014 02:40, Bandan Das ha scritto:
> Insn fetch fastpath function. Not that
> arch.walk_mmu->gva_to_gpa can't be used but let's
> piggyback on top of interface meant for our purpose
>
> Signed-off-by: Bandan Das <bsd@redhat.com>
> ---
>  arch/x86/kvm/x86.c | 25 +++++++++++++++++--------
>  1 file changed, 17 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 17e3d661..cf69e3b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4065,29 +4065,38 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
>  				      struct x86_exception *exception)
>  {
>  	void *data = val;
> -	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
>  	int r = X86EMUL_CONTINUE;
>
>  	while (bytes) {
> -		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
> -							    exception);
>  		unsigned offset = addr & (PAGE_SIZE-1);
>  		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
>  		int ret;
> +		unsigned long uaddr;
>
> -		if (gpa == UNMAPPED_GVA)
> -			return X86EMUL_PROPAGATE_FAULT;
> -		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
> +		ret = ctxt->ops->memory_prepare(ctxt, addr, toread,
> +						exception, false,
> +						NULL, &uaddr);

You can call the function from x86.c directly, no need to go through 
ctxt->ops.

> +		if (ret != X86EMUL_CONTINUE)
> +			return ret;
> +
> +		if (unlikely(kvm_is_error_hva(uaddr))) {
> +			r = X86EMUL_PROPAGATE_FAULT;
> +			return r;
> +		}
> +
> +		ret = __copy_from_user(data, (void __user *)uaddr, toread);
>  		if (ret < 0) {
>  			r = X86EMUL_IO_NEEDED;
> -			goto out;
> +			return r;
>  		}
>
> +		ctxt->ops->memory_finish(ctxt, NULL, uaddr);

No need to call memory_finish, since you know the implementation 
(perhaps add a comment).

Paolo

>  		bytes -= toread;
>  		data += toread;
>  		addr += toread;
>  	}
> -out:
> +
>  	return r;
>  }
>
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini May 6, 2014, 9:46 a.m. UTC | #2
Il 06/05/2014 02:40, Bandan Das ha scritto:
> +		ret = ctxt->ops->memory_prepare(ctxt, addr, toread,
> +						exception, false,
> +						NULL, &uaddr);
> +		if (ret != X86EMUL_CONTINUE)
> +			return ret;
> +
> +		if (unlikely(kvm_is_error_hva(uaddr))) {
> +			r = X86EMUL_PROPAGATE_FAULT;
> +			return r;
> +		}

What you are doing here is basically optimizing 
kvm_read_guest_virt_helper because you know that all reads will be 
single-page and you do not need the "next_segment" in kvm_read_guest.
Good catch, but you can use kvm_read_guest_page instead of going through 
ctxt->ops->memory_prepare. :)

Paolo

> +		ret = __copy_from_user(data, (void __user *)uaddr, toread);

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 17e3d661..cf69e3b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4065,29 +4065,38 @@  static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 				      struct x86_exception *exception)
 {
 	void *data = val;
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
 	int r = X86EMUL_CONTINUE;
 
 	while (bytes) {
-		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-							    exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
+		unsigned long uaddr;
 
-		if (gpa == UNMAPPED_GVA)
-			return X86EMUL_PROPAGATE_FAULT;
-		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
+		ret = ctxt->ops->memory_prepare(ctxt, addr, toread,
+						exception, false,
+						NULL, &uaddr);
+		if (ret != X86EMUL_CONTINUE)
+			return ret;
+
+		if (unlikely(kvm_is_error_hva(uaddr))) {
+			r = X86EMUL_PROPAGATE_FAULT;
+			return r;
+		}
+
+		ret = __copy_from_user(data, (void __user *)uaddr, toread);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
-			goto out;
+			return r;
 		}
 
+		ctxt->ops->memory_finish(ctxt, NULL, uaddr);
+
 		bytes -= toread;
 		data += toread;
 		addr += toread;
 	}
-out:
+
 	return r;
 }