@@ -210,13 +210,12 @@ static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
{
- bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
-
if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
return;
if (is_accessed_spte(old_spte) &&
- (!is_accessed_spte(new_spte) || pfn_changed))
+ (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
+ spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
@@ -444,7 +443,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
if (was_leaf && is_dirty_spte(old_spte) &&
- (!is_dirty_spte(new_spte) || pfn_changed))
+ (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
/*
When updating accessed and dirty bits, check that the new SPTE is present before attempting to query its A/D bits. Failure to confirm the SPTE is present can theoretically cause a false negative, e.g. if a MMIO SPTE replaces a "real" SPTE and somehow the PFNs magically match. Realistically, this is all but guaranteed to be a benign bug. Fix it up primarily so that a future patch can tweak the MMU_WARN_ON checking A/D status to fire if the SPTE is not-present. Fixes: f8e144971c68 ("kvm: x86/mmu: Add access tracking for tdp_mmu") Cc: Ben Gardon <bgardon@google.com> Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/mmu/tdp_mmu.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)