diff mbox series

[v5] KVM: Check the allocation of pv cpu mask

Message ID 20201103085227.25098-1-lihaiwei.kernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v5] KVM: Check the allocation of pv cpu mask | expand

Commit Message

Haiwei Li Nov. 3, 2020, 8:52 a.m. UTC
From: Haiwei Li <lihaiwei@tencent.com>

Both 'kvm_send_ipi_mask_allbutself' and 'kvm_flush_tlb_others' are using
per-cpu __pv_cpu_mask. Init pv ipi ops only if the allocation succeeds and
check the cpumask in 'kvm_flush_tlb_others'.

Thanks to Vitaly Kuznetsov's tireless advice.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
---
v1 -> v2:
 * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
 * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
v3 -> v4:
 * mov kvm_setup_pv_ipi to kvm_alloc_cpumask and get rid of kvm_apic_init
v4 -> v5:
 * remove kvm_apic_init as an empty function
 * define pv_ipi_supported() in !CONFIG_SMP case as 'false' to get rid of
 'alloc' variable
 * move kvm_setup_pv_ipi and define the implementation in CONFIG_SMP

 arch/x86/kernel/kvm.c | 75 +++++++++++++++++++++++++------------------
 1 file changed, 44 insertions(+), 31 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 42c6e0deff9e..2f2cc25d5078 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -547,16 +547,6 @@  static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 	__send_ipi_mask(local_mask, vector);
 }
 
-/*
- * Set the IPI entry points
- */
-static void kvm_setup_pv_ipi(void)
-{
-	apic->send_IPI_mask = kvm_send_ipi_mask;
-	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
-	pr_info("setup PV IPIs\n");
-}
-
 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 {
 	int cpu;
@@ -609,7 +599,24 @@  static int kvm_cpu_down_prepare(unsigned int cpu)
 	local_irq_enable();
 	return 0;
 }
+#else
+static bool pv_ipi_supported(void)
+{
+	return false;
+}
+#endif
+
+/*
+ * Set the IPI entry points
+ */
+static void kvm_setup_pv_ipi(void)
+{
+#if defined(CONFIG_SMP)
+	apic->send_IPI_mask = kvm_send_ipi_mask;
+	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+	pr_info("setup PV IPIs\n");
 #endif
+}
 
 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 			const struct flush_tlb_info *info)
@@ -619,6 +626,11 @@  static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 	struct kvm_steal_time *src;
 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+	if (unlikely(!flushmask)) {
+		native_flush_tlb_others(cpumask, info);
+		return;
+	}
+
 	cpumask_copy(flushmask, cpumask);
 	/*
 	 * We have to call flush only on online vCPUs. And
@@ -730,18 +742,9 @@  static uint32_t __init kvm_detect(void)
 	return kvm_cpuid_base();
 }
 
-static void __init kvm_apic_init(void)
-{
-#if defined(CONFIG_SMP)
-	if (pv_ipi_supported())
-		kvm_setup_pv_ipi();
-#endif
-}
-
 static void __init kvm_init_platform(void)
 {
 	kvmclock_init();
-	x86_platform.apic_post_init = kvm_apic_init;
 }
 
 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
@@ -765,29 +768,39 @@  static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);
 
+static void kvm_free_cpumask(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
 static __init int kvm_alloc_cpumask(void)
 {
 	int cpu;
-	bool alloc = false;
 
 	if (!kvm_para_available() || nopv)
 		return 0;
 
-	if (pv_tlb_flush_supported())
-		alloc = true;
-
-#if defined(CONFIG_SMP)
-	if (pv_ipi_supported())
-		alloc = true;
-#endif
-
-	if (alloc)
+	if (pv_tlb_flush_supported() || pv_ipi_supported()) {
 		for_each_possible_cpu(cpu) {
-			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-				GFP_KERNEL, cpu_to_node(cpu));
+			if (!zalloc_cpumask_var_node(
+				per_cpu_ptr(&__pv_cpu_mask, cpu),
+				GFP_KERNEL, cpu_to_node(cpu))) {
+				goto zalloc_cpumask_fail;
+			}
 		}
+	}
+
+	if (pv_ipi_supported())
+		kvm_setup_pv_ipi();
 
 	return 0;
+
+zalloc_cpumask_fail:
+	kvm_free_cpumask();
+	return -ENOMEM;
 }
 arch_initcall(kvm_alloc_cpumask);