@@ -1024,6 +1024,13 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}
+gfn_t spte_to_gfn(u64 *spte)
+{
+ struct kvm_mmu_page *sp;
+
+ sp = page_header(__pa(spte));
+ return kvm_mmu_page_get_gfn(sp, spte - sp->spt);
+}
static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
@@ -211,4 +211,5 @@ void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+gfn_t spte_to_gfn(u64 *sptep);
#endif
Signed-off-by: Ahmed Abd El Mawgood <ahmedsoliman@mena.vt.edu> --- arch/x86/kvm/mmu.c | 7 +++++++ arch/x86/kvm/mmu.h | 1 + 2 files changed, 8 insertions(+)