@@ -6144,6 +6144,15 @@ static void __set_nx_huge_pages(bool val)
nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}
+static void kvm_update_nx_huge_pages(struct kvm *kvm)
+{
+ mutex_lock(&kvm->slots_lock);
+ kvm_mmu_zap_all_fast(kvm);
+ mutex_unlock(&kvm->slots_lock);
+
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+}
+
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
bool old_val = nx_huge_pages;
@@ -6166,13 +6175,9 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
mutex_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list) {
- mutex_lock(&kvm->slots_lock);
- kvm_mmu_zap_all_fast(kvm);
- mutex_unlock(&kvm->slots_lock);
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ kvm_update_nx_huge_pages(kvm);
- wake_up_process(kvm->arch.nx_lpage_recovery_thread);
- }
mutex_unlock(&kvm_lock);
}