@@ -267,9 +267,9 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
return spte_to_child_sp(root);
}
-static inline bool is_mirror_sptep(u64 *sptep)
+static inline bool is_mirror_sptep(tdp_ptep_t sptep)
{
- return is_mirror_sp(sptep_to_sp(sptep));
+ return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
}
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
@@ -511,7 +511,7 @@ static int __must_check set_external_spte_present(struct kvm *kvm, tdp_ptep_t sp
* page table has been modified. Use FROZEN_SPTE similar to
* the zapping case.
*/
- if (!try_cmpxchg64(sptep, &old_spte, FROZEN_SPTE))
+ if (!try_cmpxchg64(rcu_dereference(sptep), &old_spte, FROZEN_SPTE))
return -EBUSY;
/*
@@ -637,8 +637,6 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
- u64 *sptep = rcu_dereference(iter->sptep);
-
/*
* The caller is responsible for ensuring the old SPTE is not a FROZEN
* SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,
@@ -662,6 +660,8 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct kvm *kvm,
if (ret)
return ret;
} else {
+ u64 *sptep = rcu_dereference(iter->sptep);
+
/*
* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs
* and does not hold the mmu_lock. On failure, i.e. if a