@@ -1212,6 +1212,14 @@ struct kvm_arch {
*/
bool memslots_have_rmaps;
+ /*
+ * Set when the KVM mmu needs guest write access page tracking. If
+ * set, the necessary gfn_track arrays have been allocated for
+ * all memslots and should be allocated for any newly created or
+ * modified memslots.
+ */
+ bool memslots_mmu_write_tracking;
+
#if IS_ENABLED(CONFIG_HYPERV)
hpa_t hv_root_tdp;
spinlock_t hv_root_tdp_lock;
@@ -49,8 +49,11 @@ struct kvm_page_track_notifier_node {
void kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_cleanup(struct kvm *kvm);
+int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm);
+
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
-int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages);
void kvm_slot_page_track_add_page(struct kvm *kvm,
@@ -3503,6 +3503,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (r)
return r;
+ r = kvm_page_track_enable_mmu_write_tracking(vcpu->kvm);
+ if (r)
+ return r;
+
write_lock(&vcpu->kvm->mmu_lock);
r = make_mmu_pages_available(vcpu);
if (r < 0)
@@ -5699,6 +5703,9 @@ void kvm_mmu_init_vm(struct kvm *kvm)
*/
kvm->arch.memslots_have_rmaps = true;
+ if (!tdp_enabled)
+ kvm->arch.memslots_mmu_write_tracking = true;
+
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
@@ -19,6 +19,16 @@
#include "mmu.h"
#include "mmu_internal.h"
+static bool write_tracking_enabled(struct kvm *kvm)
+{
+ /*
+ * Read memslots_mmu_write_tracking before gfn_track pointers. Pairs
+ * with smp_store_release in kvm_page_track_enable_mmu_write_tracking.
+ */
+ return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
+ smp_load_acquire(&kvm->arch.memslots_mmu_write_tracking);
+}
+
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
{
int i;
@@ -29,12 +39,16 @@ void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
}
}
-int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages)
{
- int i;
+ int i;
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
+ if (i == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(kvm))
+ continue;
+
slot->arch.gfn_track[i] =
kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
GFP_KERNEL_ACCOUNT);
@@ -57,6 +71,46 @@ static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
return true;
}
+int kvm_page_track_enable_mmu_write_tracking(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ unsigned short **gfn_track;
+ int i;
+
+ if (write_tracking_enabled(kvm))
+ return 0;
+
+ mutex_lock(&kvm->slots_arch_lock);
+
+ if (write_tracking_enabled(kvm)) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+ }
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(slot, slots) {
+ gfn_track = slot->arch.gfn_track + KVM_PAGE_TRACK_WRITE;
+ *gfn_track = kvcalloc(slot->npages, sizeof(*gfn_track),
+ GFP_KERNEL_ACCOUNT);
+ if (*gfn_track == NULL) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ /*
+ * Ensure that memslots_mmu_write_tracking becomes true strictly
+ * after all the pointers are set.
+ */
+ smp_store_release(&kvm->arch.memslots_mmu_write_tracking, true);
+ mutex_unlock(&kvm->slots_arch_lock);
+
+ return 0;
+}
+
static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode, short count)
{
@@ -92,6 +146,10 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
if (WARN_ON(!page_track_mode_is_valid(mode)))
return;
+ if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
+ !write_tracking_enabled(kvm)))
+ return;
+
update_gfn_track(slot, gfn, mode, 1);
/*
@@ -126,6 +184,10 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
if (WARN_ON(!page_track_mode_is_valid(mode)))
return;
+ if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
+ !write_tracking_enabled(kvm)))
+ return;
+
update_gfn_track(slot, gfn, mode, -1);
/*
@@ -148,6 +210,9 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
if (WARN_ON(!page_track_mode_is_valid(mode)))
return false;
+ if (mode == KVM_PAGE_TRACK_WRITE && !write_tracking_enabled(vcpu->kvm))
+ return false;
+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (!slot)
return false;
@@ -11474,7 +11474,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
}
}
- if (kvm_page_track_create_memslot(slot, npages))
+ if (kvm_page_track_create_memslot(kvm, slot, npages))
goto out_free;
return 0;