diff mbox series

[v5,07/17] KVM: arm64: Annotate -EFAULT from user_mem_abort()

Message ID 20230908222905.1321305-8-amoorthy@google.com (mailing list archive)
State New, archived
Headers show
Series Improve KVM + userfaultfd live migration via annotated memory faults. | expand

Commit Message

Anish Moorthy Sept. 8, 2023, 10:28 p.m. UTC
Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in
user_mem_abort().

Signed-off-by: Anish Moorthy <amoorthy@google.com>
---
 arch/arm64/kvm/mmu.c | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

Comments

Anish Moorthy Sept. 28, 2023, 9:42 p.m. UTC | #1
> +               if (write_fault)
> +                       memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;
> +               else if (exec_fault)
> +                       memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;

Ugh I could have sworn I already fixed this, thanks Oliver for bringing it up
Sean Christopherson Oct. 5, 2023, 1:26 a.m. UTC | #2
On Fri, Sep 08, 2023, Anish Moorthy wrote:
> Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in
> user_mem_abort().

Same comments as the x86 patch, this is way too terse.
David Matlack Oct. 10, 2023, 11:01 p.m. UTC | #3
On Fri, Sep 8, 2023 at 3:30 PM Anish Moorthy <amoorthy@google.com> wrote:
>
> Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in
> user_mem_abort().
>
> Signed-off-by: Anish Moorthy <amoorthy@google.com>
> ---
>  arch/arm64/kvm/mmu.c | 13 ++++++++++++-
>  1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 587a104f66c3..8ede6c5edc5f 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1408,6 +1408,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>         long vma_pagesize, fault_granule;
>         enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
>         struct kvm_pgtable *pgt;
> +       uint64_t memory_fault_flags;
>
>         fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
>         write_fault = kvm_is_write_fault(vcpu);
> @@ -1507,8 +1508,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>                 kvm_send_hwpoison_signal(hva, vma_shift);
>                 return 0;
>         }
> -       if (is_error_noslot_pfn(pfn))
> +       if (is_error_noslot_pfn(pfn)) {
> +               memory_fault_flags = 0;
> +               if (write_fault)
> +                       memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;
> +               else if (exec_fault)
> +                       memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;
> +               else
> +                       memory_fault_flags = KVM_MEMORY_FAULT_FLAG_READ;
> +               kvm_handle_guest_uaccess_fault(vcpu, round_down(gfn * PAGE_SIZE, vma_pagesize),

I think gfn * PAGE_SIZE is already rounded down to vma_pagesize. See
earlier in this function:

1484         vma_pagesize = 1UL << vma_shift;
1485         if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
1486                 fault_ipa &= ~(vma_pagesize - 1);
1487
1488         gfn = fault_ipa >> PAGE_SHIFT;


> +                                              vma_pagesize, memory_fault_flags);
>                 return -EFAULT;
> +       }
>
>         if (kvm_is_device_pfn(pfn)) {
>                 /*
> --
> 2.42.0.283.g2d96d420d3-goog
>
diff mbox series

Patch

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 587a104f66c3..8ede6c5edc5f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1408,6 +1408,7 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	uint64_t memory_fault_flags;
 
 	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
 	write_fault = kvm_is_write_fault(vcpu);
@@ -1507,8 +1508,18 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		kvm_send_hwpoison_signal(hva, vma_shift);
 		return 0;
 	}
-	if (is_error_noslot_pfn(pfn))
+	if (is_error_noslot_pfn(pfn)) {
+		memory_fault_flags = 0;
+		if (write_fault)
+			memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;
+		else if (exec_fault)
+			memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC;
+		else
+			memory_fault_flags = KVM_MEMORY_FAULT_FLAG_READ;
+		kvm_handle_guest_uaccess_fault(vcpu, round_down(gfn * PAGE_SIZE, vma_pagesize),
+					       vma_pagesize, memory_fault_flags);
 		return -EFAULT;
+	}
 
 	if (kvm_is_device_pfn(pfn)) {
 		/*