@@ -18,6 +18,7 @@ KVM_X86_OP_NULL(hardware_unsetup)
KVM_X86_OP_NULL(cpu_has_accelerated_tpr)
KVM_X86_OP(has_emulated_msr)
KVM_X86_OP(vcpu_after_set_cpuid)
+KVM_X86_OP(nested_enabled)
KVM_X86_OP(vm_init)
KVM_X86_OP_NULL(vm_destroy)
KVM_X86_OP(vcpu_create)
@@ -1049,6 +1049,7 @@ struct kvm_arch {
struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
+ bool mmu_uses_page_tracking;
/*
* Protects marking pages unsync during page faults, as TDP MMU page
* faults only take mmu_lock for read. For simplicity, the unsync
@@ -1302,6 +1303,7 @@ struct kvm_x86_ops {
bool (*cpu_has_accelerated_tpr)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
+ bool (*nested_enabled)(void);
unsigned int vm_size;
int (*vm_init)(struct kvm *kvm);
@@ -46,11 +46,12 @@ struct kvm_page_track_notifier_node {
struct kvm_page_track_notifier_node *node);
};
-void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_init(struct kvm *kvm, bool mmu_uses_page_tracking);
void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
-int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages);
void kvm_slot_page_track_add_page(struct kvm *kvm,
@@ -29,11 +29,16 @@ void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
}
}
-int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages)
{
int i;
+ if (!IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) &&
+ !kvm->arch.mmu_uses_page_tracking)
+ return 0;
+
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
slot->arch.gfn_track[i] =
kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
@@ -61,10 +66,15 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode, short count)
{
int index, val;
+ unsigned short *arr;
+
+ arr = slot->arch.gfn_track[mode];
+ if (WARN_ON(!arr))
+ return;
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
- val = slot->arch.gfn_track[mode][index];
+ val = arr[index];
if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
return;
@@ -143,6 +153,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
struct kvm_memory_slot *slot;
+ unsigned short *arr;
int index;
if (WARN_ON(!page_track_mode_is_valid(mode)))
@@ -152,8 +163,12 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
if (!slot)
return false;
+ arr = slot->arch.gfn_track[mode];
+ if (!arr)
+ return false;
+
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
- return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!READ_ONCE(arr[index]);
}
void kvm_page_track_cleanup(struct kvm *kvm)
@@ -164,13 +179,15 @@ void kvm_page_track_cleanup(struct kvm *kvm)
cleanup_srcu_struct(&head->track_srcu);
}
-void kvm_page_track_init(struct kvm *kvm)
+void kvm_page_track_init(struct kvm *kvm, bool mmu_uses_page_tracking)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list);
+
+ kvm->arch.mmu_uses_page_tracking = mmu_uses_page_tracking;
}
/*
@@ -4049,6 +4049,11 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
}
}
+static bool svm_nested_enabled(void)
+{
+ return nested;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -4590,6 +4595,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_exit_info = svm_get_exit_info,
.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
+ .nested_enabled = svm_nested_enabled,
.has_wbinvd_exit = svm_has_wbinvd_exit,
@@ -7225,6 +7225,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu);
}
+static bool vmx_nested_enabled(void)
+{
+ return nested;
+}
+
static __init void vmx_set_cpu_caps(void)
{
kvm_set_cpu_caps();
@@ -7637,6 +7642,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_exit_info = vmx_get_exit_info,
.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
+ .nested_enabled = vmx_nested_enabled,
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
@@ -11174,7 +11174,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
- kvm_page_track_init(kvm);
+ kvm_page_track_init(kvm, !tdp_enabled ||
+ static_call(kvm_x86_nested_enabled));
kvm_mmu_init_vm(kvm);
kvm_xen_init_vm(kvm);
@@ -11474,7 +11475,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
}
}
- if (kvm_page_track_create_memslot(slot, npages))
+ if (kvm_page_track_create_memslot(kvm, slot, npages))
goto out_free;
return 0;