diff mbox series

[v4,06/16] KVM: Annotate -EFAULTs from kvm_vcpu_read_guest_page()

Message ID 20230602161921.208564-7-amoorthy@google.com (mailing list archive)
State New, archived
Headers show
Series Improve scalability of KVM + userfaultfd live migration via annotated memory faults. | expand

Commit Message

Anish Moorthy June 2, 2023, 4:19 p.m. UTC
Implement KVM_CAP_MEMORY_FAULT_INFO for uaccess failures within
kvm_vcpu_read_guest_page().

Signed-off-by: Anish Moorthy <amoorthy@google.com>
---
 virt/kvm/kvm_main.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

Comments

Sean Christopherson June 14, 2023, 7:22 p.m. UTC | #1
On Fri, Jun 02, 2023, Anish Moorthy wrote:
> Implement KVM_CAP_MEMORY_FAULT_INFO for uaccess failures within
> kvm_vcpu_read_guest_page().

Same comments as the "write" patch.  And while I often advocate for tiny patches,
I see no reason to split the read and write changes into separate patches, they're
thematically identical enough to count as a "single logical change".
Anish Moorthy July 7, 2023, 5:35 p.m. UTC | #2
Done, and same question/comment as the "write" patch (though I'm sure
we'll just keep all the discussion there henceforth)
diff mbox series

Patch

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ea27a8178f1a..b9d2606f9251 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2986,9 +2986,12 @@  static int next_segment(unsigned long len, int offset)
 
 /*
  * Copy 'len' bytes from guest memory at '(gfn * PAGE_SIZE) + offset' to 'data'
+ * If 'vcpu' is non-null, then may fill its run struct for a
+ * KVM_EXIT_MEMORY_FAULT on uaccess failure.
  */
-static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
-				 void *data, int offset, int len)
+static int __kvm_read_guest_page(struct kvm_memory_slot *slot,
+				 struct kvm_vcpu *vcpu,
+				 gfn_t gfn, void *data, int offset, int len)
 {
 	int r;
 	unsigned long addr;
@@ -2997,8 +3000,12 @@  static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
 	if (kvm_is_error_hva(addr))
 		return -EFAULT;
 	r = __copy_from_user(data, (void __user *)addr + offset, len);
-	if (r)
+	if (r) {
+		if (vcpu)
+			kvm_populate_efault_info(vcpu, gfn * PAGE_SIZE + offset,
+						 len, 0);
 		return -EFAULT;
+	}
 	return 0;
 }
 
@@ -3007,7 +3014,7 @@  int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 {
 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
 
-	return __kvm_read_guest_page(slot, gfn, data, offset, len);
+	return __kvm_read_guest_page(slot, NULL, gfn, data, offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
 
@@ -3016,7 +3023,7 @@  int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
 {
 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
 
-	return __kvm_read_guest_page(slot, gfn, data, offset, len);
+	return __kvm_read_guest_page(slot, vcpu, gfn, data, offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);