@@ -3012,6 +3012,13 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
unsigned int access)
{
+ /* NOTE: not all error pfn is fatal; handle intr before the other ones */
+ if (unlikely(is_intr_pfn(fault->pfn))) {
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ ++vcpu->stat.signal_exits;
+ return -EINTR;
+ }
+
/* The pfn is invalid, report the error! */
if (unlikely(is_error_pfn(fault->pfn)))
return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
@@ -4017,6 +4024,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
}
}
+ /* Allow to respond to generic signals in slow page faults */
+ flags |= KVM_GTP_INTERRUPTIBLE;
fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, flags, NULL,
&fault->map_writable, &fault->hva);
return RET_PF_CONTINUE;
@@ -1163,6 +1163,7 @@ typedef unsigned int __bitwise kvm_gtp_flag_t;
#define KVM_GTP_WRITE ((__force kvm_gtp_flag_t) BIT(0))
#define KVM_GTP_ATOMIC ((__force kvm_gtp_flag_t) BIT(1))
+#define KVM_GTP_INTERRUPTIBLE ((__force kvm_gtp_flag_t) BIT(2))
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_gtp_flag_t gtp_flags, bool *async,
@@ -2462,6 +2462,8 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async,
flags |= FOLL_WRITE;
if (async)
flags |= FOLL_NOWAIT;
+ if (gtp_flags & KVM_GTP_INTERRUPTIBLE)
+ flags |= FOLL_INTERRUPTIBLE;
npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
@@ -2599,6 +2601,8 @@ kvm_pfn_t hva_to_pfn(unsigned long addr, kvm_gtp_flag_t gtp_flags, bool *async,
npages = hva_to_pfn_slow(addr, async, gtp_flags, writable, &pfn);
if (npages == 1)
return pfn;
+ if (npages == -EINTR)
+ return KVM_PFN_ERR_INTR;
mmap_read_lock(current->mm);
if (npages == -EHWPOISON ||
All the facilities should be ready for this, what we need to do is to add a new KVM_GTP_INTERRUPTIBLE flag showing that we're willing to be interrupted by common signals during the __gfn_to_pfn_memslot() request, and wire it up with a FOLL_INTERRUPTIBLE flag that we've just introduced. Note that only x86 slow page fault routine will set this new bit. The new bit is not used in non-x86 arch or on other gup paths even for x86. However it can actually be used elsewhere too but not yet covered. When we see the PFN fetching was interrupted, do early exit to userspace with an KVM_EXIT_INTR exit reason. Signed-off-by: Peter Xu <peterx@redhat.com> --- arch/x86/kvm/mmu/mmu.c | 9 +++++++++ include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 4 ++++ 3 files changed, 14 insertions(+)