@@ -4341,8 +4341,13 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
return -EFAULT;
}
- r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
- &fault->refcounted_page, &max_order);
+ if (kvm_slot_is_vfio_dmabuf(fault->slot))
+ r = kvm_vfio_dmabuf_get_pfn(vcpu->kvm, fault->slot, fault->gfn,
+ &fault->pfn, &max_order);
+ else
+ r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn,
+ &fault->pfn, &fault->refcounted_page,
+ &max_order);
if (r) {
kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
return r;
@@ -4363,6 +4368,22 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
if (fault->is_private)
return kvm_mmu_faultin_pfn_private(vcpu, fault);
+ /* vfio_dmabuf slot is also applicable for shared mapping */
+ if (kvm_slot_is_vfio_dmabuf(fault->slot)) {
+ int max_order, r;
+
+ r = kvm_vfio_dmabuf_get_pfn(vcpu->kvm, fault->slot, fault->gfn,
+ &fault->pfn, &max_order);
+ if (r)
+ return r;
+
+ fault->max_level = min(kvm_max_level_for_order(max_order),
+ fault->max_level);
+ fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
+
+ return RET_PF_CONTINUE;
+ }
+
foll |= FOLL_NOWAIT;
fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
&fault->map_writable, &fault->refcounted_page);
@@ -614,7 +614,12 @@ struct kvm_memory_slot {
static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
{
- return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+ return slot && (slot->flags & (KVM_MEM_GUEST_MEMFD | KVM_MEM_VFIO_DMABUF));
+}
+
+static inline bool kvm_slot_is_vfio_dmabuf(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_VFIO_DMABUF);
}
static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
Add support for resolving page faults on vfio_dmabuf backed MMIO. This is to support private MMIO for private assigned devices (known as TDI in TDISP spec). Private MMIOs are set to KVM as vfio_dmabuf typed memory slot, which is another type of can-be-private memory slot just like the gmem slot. Like gmem slot, KVM needs to map its GFN as shared or private based on the current state of the GFN's memory attribute. When page fault happens for private MMIO but private <-> shared conversion is needed, KVM still exits to userspace with exit reason KVM_EXIT_MEMORY_FAULT and toggles KVM_MEMORY_EXIT_FLAG_PRIVATE. Unlike gmem slot, vfio_dmabuf slot has only one backend MMIO resource, the switching of GFN's attribute won't change the way of getting PFN, the vfio_dmabuf specific way, kvm_vfio_dmabuf_get_pfn(). Signed-off-by: Xu Yilun <yilun.xu@linux.intel.com> --- arch/x86/kvm/mmu/mmu.c | 25 +++++++++++++++++++++++-- include/linux/kvm_host.h | 7 ++++++- 2 files changed, 29 insertions(+), 3 deletions(-)