@@ -1434,6 +1434,39 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
return vma->vm_flags & VM_MTE_ALLOWED;
}
+static kvm_pfn_t faultin_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, bool write_fault, bool *writable,
+ struct page **page, bool is_private)
+{
+ kvm_pfn_t pfn;
+ int ret;
+
+ if (!is_private)
+ return __kvm_faultin_pfn(slot, gfn, write_fault ? FOLL_WRITE : 0, writable, page);
+
+ *writable = false;
+
+ if (WARN_ON_ONCE(write_fault && memslot_is_readonly(slot)))
+ return KVM_PFN_ERR_NOSLOT_MASK;
+
+ ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, page, NULL);
+ if (!ret) {
+ *writable = write_fault;
+ return pfn;
+ }
+
+ if (ret == -EHWPOISON)
+ return KVM_PFN_ERR_HWPOISON;
+
+ return KVM_PFN_ERR_NOSLOT_MASK;
+}
+
+static bool is_private_mem(struct kvm *kvm, struct kvm_memory_slot *memslot, phys_addr_t ipa)
+{
+ return kvm_arch_has_private_mem(kvm) && kvm_slot_can_be_private(memslot) &&
+ (kvm_mem_is_private(kvm, ipa >> PAGE_SHIFT) || !memslot->userspace_addr);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1441,24 +1474,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
{
int ret = 0;
bool write_fault, writable;
- bool exec_fault, mte_allowed;
+ bool exec_fault, mte_allowed = false;
bool device = false, vfio_allow_any_uc = false;
unsigned long mmu_seq;
phys_addr_t ipa = fault_ipa;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma = NULL;
short vma_shift;
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool force_pte = logging_active;
- long vma_pagesize, fault_granule;
+ bool is_private = is_private_mem(kvm, memslot, fault_ipa);
+ bool force_pte = logging_active || is_private;
+ long vma_pagesize, fault_granule = PAGE_SIZE;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
struct page *page;
- if (fault_is_perm)
+ if (fault_is_perm && !is_private)
fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
write_fault = kvm_is_write_fault(vcpu);
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
@@ -1482,24 +1516,30 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret;
}
+ mmap_read_lock(current->mm);
+
/*
* Let's check if we will get back a huge page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- mmap_read_lock(current->mm);
- vma = vma_lookup(current->mm, hva);
- if (unlikely(!vma)) {
- kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- mmap_read_unlock(current->mm);
- return -EFAULT;
- }
+ if (!is_private) {
+ vma = vma_lookup(current->mm, hva);
+ if (unlikely(!vma)) {
+ kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
+ mmap_read_unlock(current->mm);
+ return -EFAULT;
+ }
- /*
- * logging_active is guaranteed to never be true for VM_PFNMAP
- * memslots.
- */
- if (WARN_ON_ONCE(logging_active && (vma->vm_flags & VM_PFNMAP)))
- return -EFAULT;
+ /*
+ * logging_active is guaranteed to never be true for VM_PFNMAP
+ * memslots.
+ */
+ if (WARN_ON_ONCE(logging_active && (vma->vm_flags & VM_PFNMAP)))
+ return -EFAULT;
+
+ vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
+ mte_allowed = kvm_vma_mte_allowed(vma);
+ }
if (force_pte)
vma_shift = PAGE_SHIFT;
@@ -1570,17 +1610,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
gfn = ipa >> PAGE_SHIFT;
- mte_allowed = kvm_vma_mte_allowed(vma);
-
- vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
/* Don't use the VMA after the unlock -- it may have vanished */
vma = NULL;
/*
* Read mmu_invalidate_seq so that KVM can detect if the results of
- * vma_lookup() or __kvm_faultin_pfn() become stale prior to
- * acquiring kvm->mmu_lock.
+ * vma_lookup() or faultin_pfn() become stale prior to acquiring
+ * kvm->mmu_lock.
*
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
* with the smp_wmb() in kvm_mmu_invalidate_end().
@@ -1588,8 +1625,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
- pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
- &writable, &page);
+ pfn = faultin_pfn(kvm, memslot, gfn, write_fault, &writable, &page, is_private);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
@@ -1853,6 +1853,11 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
return gfn_to_memslot(kvm, gfn)->id;
}
+static inline bool memslot_is_readonly(const struct kvm_memory_slot *slot)
+{
+ return slot->flags & KVM_MEM_READONLY;
+}
+
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{
@@ -2622,11 +2622,6 @@ unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
return size;
}
-static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
-{
- return slot->flags & KVM_MEM_READONLY;
-}
-
static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages, bool write)
{
Add arm64 support for resolving guest page faults on guest_memfd() backed memslots. This support is not contingent on pKVM, or other confidential computing support, and works in both VHE and nVHE modes. Without confidential computing, this support is useful for testing and debugging. In the future, it might also be useful should a user want to use guest_memfd() for all code, whether it's for a protected guest or not. For now, the fault granule is restricted to PAGE_SIZE. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/kvm/mmu.c | 86 ++++++++++++++++++++++++++++------------ include/linux/kvm_host.h | 5 +++ virt/kvm/kvm_main.c | 5 --- 3 files changed, 66 insertions(+), 30 deletions(-)