@@ -196,7 +196,7 @@ writable between reading spte and updating spte. Like below case:
The Dirty bit is lost in this case.
In order to avoid this kind of issue, we always treat the spte as "volatile"
-if it can be updated out of mmu-lock [see spte_has_volatile_bits()]; it means
+if it can be updated out of mmu-lock [see spte_needs_atomic_write()]; it means
the spte is always atomically updated in this case.
3) flush tlbs due to spte updated
@@ -212,7 +212,7 @@ function to update spte (present -> present).
Since the spte is "volatile" if it can be updated out of mmu-lock, we always
atomically update the spte and the race caused by fast page fault can be avoided.
-See the comments in spte_has_volatile_bits() and mmu_spte_update().
+See the comments in spte_needs_atomic_write() and mmu_spte_update().
Lockless Access Tracking:
@@ -501,7 +501,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
return false;
}
- if (!spte_has_volatile_bits(old_spte))
+ if (!spte_needs_atomic_write(old_spte))
__update_clear_spte_fast(sptep, new_spte);
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
@@ -524,7 +524,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
int level = sptep_to_sp(sptep)->role.level;
if (!is_shadow_present_pte(old_spte) ||
- !spte_has_volatile_bits(old_spte))
+ !spte_needs_atomic_write(old_spte))
__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
else
old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
@@ -129,11 +129,12 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
}
/*
- * Returns true if the SPTE has bits that may be set without holding mmu_lock.
- * The caller is responsible for checking if the SPTE is shadow-present, and
- * for determining whether or not the caller cares about non-leaf SPTEs.
+ * Returns true if the SPTE has bits other than the Accessed bit that may be
+ * changed without holding mmu_lock. The caller is responsible for checking if
+ * the SPTE is shadow-present, and for determining whether or not the caller
+ * cares about non-leaf SPTEs.
*/
-bool spte_has_volatile_bits(u64 spte)
+bool spte_needs_atomic_write(u64 spte)
{
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
return true;
@@ -519,7 +519,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen;
}
-bool spte_has_volatile_bits(u64 spte);
+bool spte_needs_atomic_write(u64 spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
@@ -55,7 +55,7 @@ static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
{
return is_shadow_present_pte(old_spte) &&
is_last_spte(old_spte, level) &&
- spte_has_volatile_bits(old_spte);
+ spte_needs_atomic_write(old_spte);
}
static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
spte_has_volatile_bits() is now a misnomer, as the an SPTE can have its Accessed bit set or cleared without the mmu_lock held, but the state of the Accessed bit is not checked in spte_has_volatile_bits(). Even if a caller uses spte_needs_atomic_write(), Accessed bit information may still be lost, but that is already tolerated, as the TLB is not invalidated after the Accessed bit is cleared. Signed-off-by: James Houghton <jthoughton@google.com> --- Documentation/virt/kvm/locking.rst | 4 ++-- arch/x86/kvm/mmu/mmu.c | 4 ++-- arch/x86/kvm/mmu/spte.c | 9 +++++---- arch/x86/kvm/mmu/spte.h | 2 +- arch/x86/kvm/mmu/tdp_iter.h | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-)