diff mbox series

[v2,3/3] KVM: arm64: Perform memory fault exits when stage-2 handler EFAULTs

Message ID 20240809205158.1340255-4-amoorthy@google.com (mailing list archive)
State New, archived
Headers show
Series Set up KVM_EXIT_MEMORY_FAULTs when arm64/x86 stage-2 fault handlers fail | expand

Commit Message

Anish Moorthy Aug. 9, 2024, 8:51 p.m. UTC
Right now userspace just gets a bare EFAULT when the stage-2 fault
handler fails to fault in the relevant page. Set up a
KVM_EXIT_MEMORY_FAULT whenever this happens, which at the very least
eases debugging and might also let userspace decide on/take some
specific action other than crashing the VM.

In some cases, user_mem_abort() EFAULTs before the size of the fault is
calculated: return 0 in these cases to indicate that the fault is of
unknown size.

Signed-off-by: Anish Moorthy <amoorthy@google.com>
---
 Documentation/virt/kvm/api.rst |  2 +-
 arch/arm64/kvm/arm.c           |  1 +
 arch/arm64/kvm/mmu.c           | 11 ++++++++++-
 3 files changed, 12 insertions(+), 2 deletions(-)

Comments

Aneesh Kumar K.V Aug. 12, 2024, 7:51 a.m. UTC | #1
Anish Moorthy <amoorthy@google.com> writes:

> Right now userspace just gets a bare EFAULT when the stage-2 fault
> handler fails to fault in the relevant page. Set up a
> KVM_EXIT_MEMORY_FAULT whenever this happens, which at the very least
> eases debugging and might also let userspace decide on/take some
> specific action other than crashing the VM.
>
> In some cases, user_mem_abort() EFAULTs before the size of the fault is
> calculated: return 0 in these cases to indicate that the fault is of
> unknown size.
>

VMMs are now converting private memory to shared or vice-versa on vcpu
exit due to memory fault. This change will require VMM track each page's
private/shared state so that they can now handle an exit fault on a
shared memory where the fault happened due to reasons other than
conversion. Should we make it easy by adding additional flag bits to
indicate the fault was due to attribute and access type mismatch?


> Signed-off-by: Anish Moorthy <amoorthy@google.com>
> ---
>  Documentation/virt/kvm/api.rst |  2 +-
>  arch/arm64/kvm/arm.c           |  1 +
>  arch/arm64/kvm/mmu.c           | 11 ++++++++++-
>  3 files changed, 12 insertions(+), 2 deletions(-)
>
> diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
> index c5ce7944005c..7b321fefcb3e 100644
> --- a/Documentation/virt/kvm/api.rst
> +++ b/Documentation/virt/kvm/api.rst
> @@ -8129,7 +8129,7 @@ unavailable to host or other VMs.
>  7.34 KVM_CAP_MEMORY_FAULT_INFO
>  ------------------------------
>  
> -:Architectures: x86
> +:Architectures: arm64, x86
>  :Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
>  
>  The presence of this capability indicates that KVM_RUN *may* fill
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index a7ca776b51ec..4121b5a43b9c 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -335,6 +335,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>  	case KVM_CAP_ARM_SYSTEM_SUSPEND:
>  	case KVM_CAP_IRQFD_RESAMPLE:
>  	case KVM_CAP_COUNTER_OFFSET:
> +	case KVM_CAP_MEMORY_FAULT_INFO:
>  		r = 1;
>  		break;
>  	case KVM_CAP_SET_GUEST_DEBUG2:
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6981b1bc0946..c97199d1feac 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1448,6 +1448,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  
>  	if (fault_is_perm && !write_fault && !exec_fault) {
>  		kvm_err("Unexpected L2 read permission error\n");
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
> +					      write_fault, exec_fault, false);
>  		return -EFAULT;
>  	}
>  
> @@ -1473,6 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	if (unlikely(!vma)) {
>  		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
>  		mmap_read_unlock(current->mm);
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
> +					      write_fault, exec_fault, false);
>  		return -EFAULT;
>  	}
>  
> @@ -1568,8 +1572,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		kvm_send_hwpoison_signal(hva, vma_shift);
>  		return 0;
>  	}
> -	if (is_error_noslot_pfn(pfn))
> +	if (is_error_noslot_pfn(pfn)) {
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, vma_pagesize,
> +					      write_fault, exec_fault, false);
>  		return -EFAULT;
> +	}
>  
>  	if (kvm_is_device_pfn(pfn)) {
>  		/*
> @@ -1643,6 +1650,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		if (mte_allowed) {
>  			sanitise_mte_tags(kvm, pfn, vma_pagesize);
>  		} else {
> +			kvm_prepare_memory_fault_exit(vcpu, fault_ipa, vma_pagesize,
> +						      write_fault, exec_fault, false);
>  			ret = -EFAULT;
>  			goto out_unlock;
>  		}
> -- 
> 2.46.0.76.ge559c4bf1a-goog
Sean Christopherson Aug. 13, 2024, 2:26 p.m. UTC | #2
On Mon, Aug 12, 2024, Aneesh Kumar K.V wrote:
> Anish Moorthy <amoorthy@google.com> writes:
> 
> > Right now userspace just gets a bare EFAULT when the stage-2 fault
> > handler fails to fault in the relevant page. Set up a
> > KVM_EXIT_MEMORY_FAULT whenever this happens, which at the very least
> > eases debugging and might also let userspace decide on/take some
> > specific action other than crashing the VM.
> >
> > In some cases, user_mem_abort() EFAULTs before the size of the fault is
> > calculated: return 0 in these cases to indicate that the fault is of
> > unknown size.
> >
> 
> VMMs are now converting private memory to shared or vice-versa on vcpu
> exit due to memory fault. This change will require VMM track each page's
> private/shared state so that they can now handle an exit fault on a
> shared memory where the fault happened due to reasons other than
> conversion.

I don't see how filling kvm_run.memory_fault in more locations changes anything.
The userspace exits are inherently racy, e.g. userspace may have already converted
the page to the appropriate state, thus making KVM's exit spurious.  So either
the VMM already tracks state, or the VMM blindly converts to shared/private.

> Should we make it easy by adding additional flag bits to
> indicate the fault was due to attribute and access type mismatch?

Like above, describing _why_ an exit occurred is problematic when an exit races
with a "fix" from userspace.  It's also problematic when there are multiple
possible faults, e.g. if the guest attempts to write to private memory, but
userspace has the memory mapped as read-only, shared (contrived, but possible).
Describing only the fault that KVM's see means the vCPU will encounter multiple
faults, and userspace will end up getting multiple exits

Instead, KVM should describe the access that led to the fault, as planned in the
original series[1][2].  Userpace can then get the page into the correct state
straightaway, or take punitive action if the guest is misbehaving.

	if (is_write)
		vcpu->run->memory_fault.flags |= KVM_MEMORY_FAULT_FLAG_WRITE;
	else if (is_exec)
		vcpu->run->memory_fault.flags |= KVM_MEMORY_FAULT_FLAG_EXEC;
	else
		vcpu->run->memory_fault.flags |= KVM_MEMORY_FAULT_FLAG_READ;

That said, I'm a little hesitant to capture RWX information without a use case,
mainly because it will require a new capability for userspace to be able to rely
on the information.  In hindsight, it probably would have been better to capture
RWX information in the initial implementation.  Doh.

[1] https://lore.kernel.org/all/ZIn6VQSebTRN1jtX@google.com
[2] https://lore.kernel.org/all/ZR4N8cwzTMDanPUY@google.com
Aneesh Kumar K.V Aug. 14, 2024, 8:02 a.m. UTC | #3
Sean Christopherson <seanjc@google.com> writes:

> On Mon, Aug 12, 2024, Aneesh Kumar K.V wrote:
>> Anish Moorthy <amoorthy@google.com> writes:
>>
>> > Right now userspace just gets a bare EFAULT when the stage-2 fault
>> > handler fails to fault in the relevant page. Set up a
>> > KVM_EXIT_MEMORY_FAULT whenever this happens, which at the very least
>> > eases debugging and might also let userspace decide on/take some
>> > specific action other than crashing the VM.
>> >
>> > In some cases, user_mem_abort() EFAULTs before the size of the fault is
>> > calculated: return 0 in these cases to indicate that the fault is of
>> > unknown size.
>> >
>>
>> VMMs are now converting private memory to shared or vice-versa on vcpu
>> exit due to memory fault. This change will require VMM track each page's
>> private/shared state so that they can now handle an exit fault on a
>> shared memory where the fault happened due to reasons other than
>> conversion.
>
> I don't see how filling kvm_run.memory_fault in more locations changes anything.
> The userspace exits are inherently racy, e.g. userspace may have already converted
> the page to the appropriate state, thus making KVM's exit spurious.  So either
> the VMM already tracks state, or the VMM blindly converts to shared/private.
>

I might be missing some details here. The change is adding exit_reason =
KVM_EXIT_MEMORY_FAULT to code path which would earlier result in VMM
panics?

For ex:

@@ -1473,6 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
	if (unlikely(!vma)) {
		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
		mmap_read_unlock(current->mm);
+		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
+					      write_fault, exec_fault, false);
		return -EFAULT;
	}


VMMs handle this with code as below

static bool handle_memoryfault(struct kvm_cpu *vcpu)
{
....
        return true;
}

bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
{
	switch (vcpu->kvm_run->exit_reason) {
        ...
	case KVM_EXIT_MEMORY_FAULT:
		return handle_memoryfault(vcpu);
	}

	return false;
}

and the caller did

		ret = kvm_cpu__handle_exit(cpu);
		if (!ret)
			goto panic_kvm;
		break;


This change will break those VMMs isn't? ie, we will not panic after
this change?

-aneesh
Sean Christopherson Aug. 14, 2024, 2:49 p.m. UTC | #4
On Wed, Aug 14, 2024, Aneesh Kumar K.V wrote:
> Sean Christopherson <seanjc@google.com> writes:
> 
> > On Mon, Aug 12, 2024, Aneesh Kumar K.V wrote:
> >> Anish Moorthy <amoorthy@google.com> writes:
> >>
> >> > Right now userspace just gets a bare EFAULT when the stage-2 fault
> >> > handler fails to fault in the relevant page. Set up a
> >> > KVM_EXIT_MEMORY_FAULT whenever this happens, which at the very least
> >> > eases debugging and might also let userspace decide on/take some
> >> > specific action other than crashing the VM.
> >> >
> >> > In some cases, user_mem_abort() EFAULTs before the size of the fault is
> >> > calculated: return 0 in these cases to indicate that the fault is of
> >> > unknown size.
> >> >
> >>
> >> VMMs are now converting private memory to shared or vice-versa on vcpu
> >> exit due to memory fault. This change will require VMM track each page's
> >> private/shared state so that they can now handle an exit fault on a
> >> shared memory where the fault happened due to reasons other than
> >> conversion.
> >
> > I don't see how filling kvm_run.memory_fault in more locations changes anything.
> > The userspace exits are inherently racy, e.g. userspace may have already converted
> > the page to the appropriate state, thus making KVM's exit spurious.  So either
> > the VMM already tracks state, or the VMM blindly converts to shared/private.
> >
> 
> I might be missing some details here. The change is adding exit_reason =
> KVM_EXIT_MEMORY_FAULT to code path which would earlier result in VMM
> panics?
> 
> For ex:
> 
> @@ -1473,6 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
> 	if (unlikely(!vma)) {
> 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
> 		mmap_read_unlock(current->mm);
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
> +					      write_fault, exec_fault, false);
> 		return -EFAULT;
> 	}
> 
> 
> VMMs handle this with code as below
> 
> static bool handle_memoryfault(struct kvm_cpu *vcpu)
> {
> ....
>         return true;
> }
> 
> bool kvm_cpu__handle_exit(struct kvm_cpu *vcpu)
> {
> 	switch (vcpu->kvm_run->exit_reason) {
>         ...
> 	case KVM_EXIT_MEMORY_FAULT:
> 		return handle_memoryfault(vcpu);
> 	}
> 
> 	return false;
> }
> 
> and the caller did
> 
> 		ret = kvm_cpu__handle_exit(cpu);
> 		if (!ret)
> 			goto panic_kvm;
> 		break;
> 
> 
> This change will break those VMMs isn't? ie, we will not panic after
> this change?

If the VMM unconditionally resumes the guest on errno=EFAULT, that's a VMM bug.
handle_memoryfault() needs to have some amount of checking to verify that it can
actually resolve the fault that was reported, given the gfn and metadata.  In
practice, that means panicking on any gfn that's not associated with a memslot
that has KVM_MEM_GUEST_MEMFD, because prior to this series, it's impossible for
userspace to resolve any faults besides implict shared<=>private conversions.
Sean Christopherson Aug. 16, 2024, 9:22 p.m. UTC | #5
On Fri, Aug 09, 2024, Anish Moorthy wrote:
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 6981b1bc0946..c97199d1feac 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1448,6 +1448,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  
>  	if (fault_is_perm && !write_fault && !exec_fault) {
>  		kvm_err("Unexpected L2 read permission error\n");
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,

In this case, KVM has the fault granule, can't we just use that instead of
reporting '0'?

> +					      write_fault, exec_fault, false);
>  		return -EFAULT;
>  	}
>  
> @@ -1473,6 +1475,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  	if (unlikely(!vma)) {
>  		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
>  		mmap_read_unlock(current->mm);
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,

Why can't KVM use the minimum possible page (granule?) size.  It's _always_ legal
for KVM to map at a smaller granularity than the primary MMU, thus it's always
accurate to report KVM's minimum size.

In fact, I would argue that it's inaccurate to report anything larger, because
there's no way for KVM to know if the badness extends to an entire hugepage.
E.g. even in the MTE case below, reporting the vma _page size_ is weird.  IIUC,
the problem exists with the entire vma, not some random (huge)page in the vma.

> +					      write_fault, exec_fault, false);
>  		return -EFAULT;
>  	}
>  
> @@ -1568,8 +1572,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
>  		kvm_send_hwpoison_signal(hva, vma_shift);
>  		return 0;
>  	}
> -	if (is_error_noslot_pfn(pfn))
> +	if (is_error_noslot_pfn(pfn)) {
> +		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, vma_pagesize,
> +					      write_fault, exec_fault, false);
>  		return -EFAULT;

Shouldn't this be:

	if (KVM_BUG_ON(is_error_noslot_pfn(pfn), vcpu->kvm))
		return -EIO;

Emulated MMIO is suppposed to be handled in kvm_handle_guest_abort():

	if (kvm_is_error_hva(hva) || (write_fault && !writable)) {o
		...

		/*
		 * The IPA is reported as [MAX:12], so we need to
		 * complement it with the bottom 12 bits from the
		 * faulting VA. This is always 12 bits, irrespective
		 * of the page size.
		 */
		ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
		ret = io_mem_abort(vcpu, ipa);
		goto out_unlock;
	}

And the memslot itself is stable, e.g. it can't disappear, and it can't have its
flags toggled.  KVM specifically does all modifications to memslots on unreachable
structures so that a memslot cannot change once it has been retrieved from the
memslots tree. 
	/*
	 * Mark the current slot INVALID.  As with all memslot modifications,
	 * this must be done on an unreachable slot to avoid modifying the
	 * current slot in the active tree.
	 */
	kvm_copy_memslot(invalid_slot, old);
	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
	kvm_replace_memslot(kvm, old, invalid_slot);

And if KVM were indeed re-retrieving the memslot from kvm->memslots, then the
appropriate behavior would be

	if (is_error_noslot_pfn(pfn))
		return -EAGAIN;

so that KVM retries the fault.  It's perfectly legal to delete a memslot at any
time, with the rather large caveat that if bad things happen to the guest, it's
userspace responsibility to deal with the fallout.
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index c5ce7944005c..7b321fefcb3e 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8129,7 +8129,7 @@  unavailable to host or other VMs.
 7.34 KVM_CAP_MEMORY_FAULT_INFO
 ------------------------------
 
-:Architectures: x86
+:Architectures: arm64, x86
 :Returns: Informational only, -EINVAL on direct KVM_ENABLE_CAP.
 
 The presence of this capability indicates that KVM_RUN *may* fill
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a7ca776b51ec..4121b5a43b9c 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -335,6 +335,7 @@  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_ARM_SYSTEM_SUSPEND:
 	case KVM_CAP_IRQFD_RESAMPLE:
 	case KVM_CAP_COUNTER_OFFSET:
+	case KVM_CAP_MEMORY_FAULT_INFO:
 		r = 1;
 		break;
 	case KVM_CAP_SET_GUEST_DEBUG2:
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6981b1bc0946..c97199d1feac 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1448,6 +1448,8 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
 	if (fault_is_perm && !write_fault && !exec_fault) {
 		kvm_err("Unexpected L2 read permission error\n");
+		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
+					      write_fault, exec_fault, false);
 		return -EFAULT;
 	}
 
@@ -1473,6 +1475,8 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	if (unlikely(!vma)) {
 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
 		mmap_read_unlock(current->mm);
+		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, 0,
+					      write_fault, exec_fault, false);
 		return -EFAULT;
 	}
 
@@ -1568,8 +1572,11 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		kvm_send_hwpoison_signal(hva, vma_shift);
 		return 0;
 	}
-	if (is_error_noslot_pfn(pfn))
+	if (is_error_noslot_pfn(pfn)) {
+		kvm_prepare_memory_fault_exit(vcpu, fault_ipa, vma_pagesize,
+					      write_fault, exec_fault, false);
 		return -EFAULT;
+	}
 
 	if (kvm_is_device_pfn(pfn)) {
 		/*
@@ -1643,6 +1650,8 @@  static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 		if (mte_allowed) {
 			sanitise_mte_tags(kvm, pfn, vma_pagesize);
 		} else {
+			kvm_prepare_memory_fault_exit(vcpu, fault_ipa, vma_pagesize,
+						      write_fault, exec_fault, false);
 			ret = -EFAULT;
 			goto out_unlock;
 		}