@@ -2986,9 +2986,12 @@ static int next_segment(unsigned long len, int offset)
/*
* Copy 'len' bytes from guest memory at '(gfn * PAGE_SIZE) + offset' to 'data'
+ * If 'vcpu' is non-null, then may fill its run struct for a
+ * KVM_EXIT_MEMORY_FAULT on uaccess failure.
*/
-static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
- void *data, int offset, int len)
+static int __kvm_read_guest_page(struct kvm_memory_slot *slot,
+ struct kvm_vcpu *vcpu,
+ gfn_t gfn, void *data, int offset, int len)
{
int r;
unsigned long addr;
@@ -2997,8 +3000,12 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_from_user(data, (void __user *)addr + offset, len);
- if (r)
+ if (r) {
+ if (vcpu)
+ kvm_populate_efault_info(vcpu, gfn * PAGE_SIZE + offset,
+ len, 0);
return -EFAULT;
+ }
return 0;
}
@@ -3007,7 +3014,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
- return __kvm_read_guest_page(slot, gfn, data, offset, len);
+ return __kvm_read_guest_page(slot, NULL, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
@@ -3016,7 +3023,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return __kvm_read_guest_page(slot, gfn, data, offset, len);
+ return __kvm_read_guest_page(slot, vcpu, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
Implement KVM_CAP_MEMORY_FAULT_INFO for uaccess failures within kvm_vcpu_read_guest_page(). Signed-off-by: Anish Moorthy <amoorthy@google.com> --- virt/kvm/kvm_main.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-)