@@ -1039,7 +1039,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gpa_t pte_gpa;
gfn_t gfn;
- if (!sp->spt[i])
+ if (!__is_shadow_present_pte(sp->spt[i]))
continue;
pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
@@ -174,9 +174,22 @@ static inline bool is_access_track_spte(u64 spte)
return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
}
-static inline int is_shadow_present_pte(u64 pte)
+static inline bool __is_shadow_present_pte(u64 pte)
{
- return (pte != 0) && !is_mmio_spte(pte);
+ /*
+ * Ignore bits 63 and 62 so that they can be set in SPTEs that are well
+ * and truly not present. We can't use the sane/obvious approach of
+ * querying bits 2:0 (RWX or P) because EPT without A/D bits will clear
+ * RWX of a "present" SPTE to do access tracking. Tracking updates can
+ * be done out of mmu_lock, so even the flushing logic needs to treat
+ * such SPTEs as present.
+ */
+ return !!(pte << 2);
+}
+
+static inline bool is_shadow_present_pte(u64 pte)
+{
+ return __is_shadow_present_pte(pte) && !is_mmio_spte(pte);
}
static inline int is_large_pte(u64 pte)