@@ -102,6 +102,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
+bool kvm_is_mmio_pfn(kvm_pfn_t pfn);
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
@@ -105,7 +105,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
return spte;
}
-static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
+bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
@@ -125,6 +125,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
pfn_to_hpa(pfn + 1) - 1,
E820_TYPE_RAM);
}
+EXPORT_SYMBOL_GPL(kvm_is_mmio_pfn);
/*
* Returns true if the SPTE has bits that may be set without holding mmu_lock.