Message ID | 20230908222905.1321305-8-amoorthy@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Improve KVM + userfaultfd live migration via annotated memory faults. | expand |
> + if (write_fault) > + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; > + else if (exec_fault) > + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; Ugh I could have sworn I already fixed this, thanks Oliver for bringing it up
On Fri, Sep 08, 2023, Anish Moorthy wrote: > Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in > user_mem_abort(). Same comments as the x86 patch, this is way too terse.
On Fri, Sep 8, 2023 at 3:30 PM Anish Moorthy <amoorthy@google.com> wrote: > > Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in > user_mem_abort(). > > Signed-off-by: Anish Moorthy <amoorthy@google.com> > --- > arch/arm64/kvm/mmu.c | 13 ++++++++++++- > 1 file changed, 12 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index 587a104f66c3..8ede6c5edc5f 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -1408,6 +1408,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > long vma_pagesize, fault_granule; > enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; > struct kvm_pgtable *pgt; > + uint64_t memory_fault_flags; > > fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); > write_fault = kvm_is_write_fault(vcpu); > @@ -1507,8 +1508,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, > kvm_send_hwpoison_signal(hva, vma_shift); > return 0; > } > - if (is_error_noslot_pfn(pfn)) > + if (is_error_noslot_pfn(pfn)) { > + memory_fault_flags = 0; > + if (write_fault) > + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; > + else if (exec_fault) > + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; > + else > + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_READ; > + kvm_handle_guest_uaccess_fault(vcpu, round_down(gfn * PAGE_SIZE, vma_pagesize), I think gfn * PAGE_SIZE is already rounded down to vma_pagesize. See earlier in this function: 1484 vma_pagesize = 1UL << vma_shift; 1485 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) 1486 fault_ipa &= ~(vma_pagesize - 1); 1487 1488 gfn = fault_ipa >> PAGE_SHIFT; > + vma_pagesize, memory_fault_flags); > return -EFAULT; > + } > > if (kvm_is_device_pfn(pfn)) { > /* > -- > 2.42.0.283.g2d96d420d3-goog >
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 587a104f66c3..8ede6c5edc5f 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1408,6 +1408,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; + uint64_t memory_fault_flags; fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); @@ -1507,8 +1508,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_send_hwpoison_signal(hva, vma_shift); return 0; } - if (is_error_noslot_pfn(pfn)) + if (is_error_noslot_pfn(pfn)) { + memory_fault_flags = 0; + if (write_fault) + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; + else if (exec_fault) + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_EXEC; + else + memory_fault_flags = KVM_MEMORY_FAULT_FLAG_READ; + kvm_handle_guest_uaccess_fault(vcpu, round_down(gfn * PAGE_SIZE, vma_pagesize), + vma_pagesize, memory_fault_flags); return -EFAULT; + } if (kvm_is_device_pfn(pfn)) { /*
Implement KVM_CAP_MEMORY_FAULT_INFO for guest access failure in user_mem_abort(). Signed-off-by: Anish Moorthy <amoorthy@google.com> --- arch/arm64/kvm/mmu.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)