@@ -26,6 +26,7 @@
#include <linux/irqbypass.h>
#include <linux/hyperv.h>
#include <linux/kfifo.h>
+#include <linux/atomic/atomic-long.h>
#include <asm/apic.h>
#include <asm/pvclock-abi.h>
@@ -401,7 +402,7 @@ union kvm_cpu_role {
};
struct kvm_rmap_head {
- unsigned long val;
+ atomic_long_t val;
};
struct kvm_pio_request {
@@ -909,11 +909,117 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
* About rmap_head encoding:
*
* If the bit zero of rmap_head->val is clear, then it points to the only spte
- * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
+ * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
* pte_list_desc containing more mappings.
*/
#define KVM_RMAP_MANY BIT(0)
+/*
+ * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
+ * operates with mmu_lock held for write), but rmaps can be walked without
+ * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
+ * being zapped/dropped _while the rmap is locked_.
+ *
+ * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
+ * done while holding mmu_lock for write. This allows a task walking rmaps
+ * without holding mmu_lock to concurrently walk the same entries as a task
+ * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify
+ * the rmaps, thus the walks are stable.
+ *
+ * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
+ * only the rmap chains themselves are protected. E.g. holding an rmap's lock
+ * ensures all "struct pte_list_desc" fields are stable.
+ */
+#define KVM_RMAP_LOCKED BIT(1)
+
+static unsigned long kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
+{
+ unsigned long old_val, new_val;
+
+ /*
+ * Elide the lock if the rmap is empty, as lockless walkers (read-only
+ * mode) don't need to (and can't) walk an empty rmap, nor can they add
+ * entries to the rmap. I.e. the only paths that process empty rmaps
+ * do so while holding mmu_lock for write, and are mutually exclusive.
+ */
+ old_val = atomic_long_read(&rmap_head->val);
+ if (!old_val)
+ return 0;
+
+ do {
+ /*
+ * If the rmap is locked, wait for it to be unlocked before
+ * trying acquire the lock, e.g. to bounce the cache line.
+ */
+ while (old_val & KVM_RMAP_LOCKED) {
+ old_val = atomic_long_read(&rmap_head->val);
+ cpu_relax();
+ }
+
+ /*
+ * Recheck for an empty rmap, it may have been purged by the
+ * task that held the lock.
+ */
+ if (!old_val)
+ return 0;
+
+ new_val = old_val | KVM_RMAP_LOCKED;
+ /*
+ * Use try_cmpxchg_acquire to prevent reads and writes to the rmap
+ * from being reordered outside of the critical section created by
+ * __kvm_rmap_lock.
+ *
+ * Pairs with smp_store_release in kvm_rmap_unlock.
+ *
+ * For the !old_val case, no ordering is needed, as there is no rmap
+ * to walk.
+ */
+ } while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
+
+ /* Return the old value, i.e. _without_ the LOCKED bit set. */
+ return old_val;
+}
+
+static void kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
+ unsigned long new_val)
+{
+ WARN_ON_ONCE(new_val & KVM_RMAP_LOCKED);
+ /*
+ * Ensure that all accesses to the rmap have completed
+ * before we actually unlock the rmap.
+ *
+ * Pairs with the atomic_long_try_cmpxchg_acquire in __kvm_rmap_lock.
+ */
+ atomic_long_set_release(&rmap_head->val, new_val);
+}
+
+static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
+{
+ return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
+}
+
+/*
+ * If mmu_lock isn't held, rmaps can only locked in read-only mode. The actual
+ * locking is the same, but the caller is disallowed from modifying the rmap,
+ * and so the unlock flow is a nop if the rmap is/was empty.
+ */
+__maybe_unused
+static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
+{
+ return __kvm_rmap_lock(rmap_head);
+}
+
+__maybe_unused
+static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
+ unsigned long old_val)
+{
+ if (!old_val)
+ return;
+
+ KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
+ atomic_long_set(&rmap_head->val, old_val);
+}
+
/*
* Returns the number of pointers in the rmap chain, not counting the new one.
*/
@@ -924,7 +1030,7 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
struct pte_list_desc *desc;
int count = 0;
- old_val = rmap_head->val;
+ old_val = kvm_rmap_lock(rmap_head);
if (!old_val) {
new_val = (unsigned long)spte;
@@ -956,7 +1062,7 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
desc->sptes[desc->spte_count++] = spte;
}
- rmap_head->val = new_val;
+ kvm_rmap_unlock(rmap_head, new_val);
return count;
}
@@ -1004,7 +1110,7 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
unsigned long rmap_val;
int i;
- rmap_val = rmap_head->val;
+ rmap_val = kvm_rmap_lock(rmap_head);
if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
goto out;
@@ -1030,7 +1136,7 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
}
out:
- rmap_head->val = rmap_val;
+ kvm_rmap_unlock(rmap_head, rmap_val);
}
static void kvm_zap_one_rmap_spte(struct kvm *kvm,
@@ -1048,7 +1154,7 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
unsigned long rmap_val;
int i;
- rmap_val = rmap_head->val;
+ rmap_val = kvm_rmap_lock(rmap_head);
if (!rmap_val)
return false;
@@ -1067,13 +1173,13 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
}
out:
/* rmap_head is meaningless now, remember to reset it */
- rmap_head->val = 0;
+ kvm_rmap_unlock(rmap_head, 0);
return true;
}
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{
- unsigned long rmap_val = rmap_head->val;
+ unsigned long rmap_val = kvm_rmap_get(rmap_head);
struct pte_list_desc *desc;
if (!rmap_val)
@@ -1139,7 +1245,7 @@ struct rmap_iterator {
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{
- unsigned long rmap_val = rmap_head->val;
+ unsigned long rmap_val = kvm_rmap_get(rmap_head);
u64 *sptep;
if (!rmap_val)
@@ -1483,7 +1589,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
while (++iterator->rmap <= iterator->end_rmap) {
iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
- if (iterator->rmap->val)
+ if (atomic_long_read(&iterator->rmap->val))
return;
}
@@ -2513,7 +2619,8 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
* avoids retaining a large number of stale nested SPs.
*/
if (tdp_enabled && invalid_list &&
- child->role.guest_mode && !child->parent_ptes.val)
+ child->role.guest_mode &&
+ !atomic_long_read(&child->parent_ptes.val))
return kvm_mmu_prepare_zap_page(kvm, child,
invalid_list);
}