@@ -327,6 +327,7 @@ struct kvm_mmu_page {
struct hlist_node hash_link;
bool unsync;
bool mmio_cached;
+ bool last_level;
/*
* The following two entries are used to key the shadow page in the
@@ -2754,6 +2754,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
*/
if (kvm_available_flush_tlb_with_range()) {
list_for_each_entry(sp, invalid_list, link)
+ if (sp->last_level)
hlist_add_head(&sp->flush_link, &flush_list);
kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
@@ -2956,6 +2957,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
+
if (tdp_enabled)
spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
kvm_is_mmio_pfn(pfn));
@@ -3010,6 +3012,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (speculative)
spte = mark_spte_for_access_track(spte);
+ sp->last_level = true;
+
set_pte:
if (mmu_spte_update(sptep, spte))
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
@@ -3200,6 +3204,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
iterator.level - 1, 1, ACC_ALL);
link_shadow_page(vcpu, iterator.sptep, sp);
+
+ sp = page_header(__pa(iterator.sptep));
+ if (sp->last_level)
+ sp->last_level = false;
}
}
return emulate;