diff mbox series

[v3,19/22] KVM: arm64: Annotate (some) -EFAULTs from user_mem_abort()

Message ID 20230412213510.1220557-20-amoorthy@google.com (mailing list archive)
State New, archived
Headers show
Series Improve scalability of KVM + userfaultfd live migration via annotated memory faults. | expand

Commit Message

Anish Moorthy April 12, 2023, 9:35 p.m. UTC
Implement KVM_CAP_MEMORY_FAULT_INFO for at least some -EFAULTs returned
by user_mem_abort(). Other EFAULTs returned by this function come from
before the guest physical address of the fault is calculated: leave
those unannotated.

Signed-off-by: Anish Moorthy <amoorthy@google.com>
---
 arch/arm64/kvm/mmu.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7113587222ffe..d5ae636c26d62 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1307,8 +1307,11 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		kvm_send_hwpoison_signal(hva, vma_shift);
 		return 0;
 	}
-	if (is_error_noslot_pfn(pfn))
+	if (is_error_noslot_pfn(pfn)) {
+		kvm_populate_efault_info(vcpu, round_down(gfn * PAGE_SIZE, vma_pagesize),
+				vma_pagesize);
 		return -EFAULT;
+	}
 
 	if (kvm_is_device_pfn(pfn)) {
 		/*
@@ -1357,6 +1360,8 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		if (kvm_vma_mte_allowed(vma)) {
 			sanitise_mte_tags(kvm, pfn, vma_pagesize);
 		} else {
+			kvm_populate_efault_info(vcpu,
+					round_down(gfn * PAGE_SIZE, vma_pagesize), vma_pagesize);
 			ret = -EFAULT;
 			goto out_unlock;
 		}