diff mbox series

x86/kvm: Don't alloc __pv_cpu_mask when !CONFIG_SMP

Message ID 1617785588-18722-1-git-send-email-wanpengli@tencent.com (mailing list archive)
State New, archived
Headers show
Series x86/kvm: Don't alloc __pv_cpu_mask when !CONFIG_SMP | expand

Commit Message

Wanpeng Li April 7, 2021, 8:53 a.m. UTC
From: Wanpeng Li <wanpengli@tencent.com>

Enable PV TLB shootdown when !CONFIG_SMP doesn't make sense. Let's move 
it inside CONFIG_SMP. In addition, we can avoid alloc __pv_cpu_mask when 
!CONFIG_SMP and get rid of 'alloc' variable in kvm_alloc_cpumask.

Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
---
 arch/x86/kernel/kvm.c | 79 +++++++++++++++++++++++++--------------------------
 1 file changed, 39 insertions(+), 40 deletions(-)

Comments

Sean Christopherson April 8, 2021, 8:20 p.m. UTC | #1
On Wed, Apr 07, 2021, Wanpeng Li wrote:
> From: Wanpeng Li <wanpengli@tencent.com>
> 
> Enable PV TLB shootdown when !CONFIG_SMP doesn't make sense. Let's move 
> it inside CONFIG_SMP. In addition, we can avoid alloc __pv_cpu_mask when 
> !CONFIG_SMP and get rid of 'alloc' variable in kvm_alloc_cpumask.

...

> +static bool pv_tlb_flush_supported(void) { return false; }
> +static bool pv_ipi_supported(void) { return false; }
> +static void kvm_flush_tlb_others(const struct cpumask *cpumask,
> +			const struct flush_tlb_info *info) { }
> +static void kvm_setup_pv_ipi(void) { }

If you shuffle things around a bit more, you can avoid these stubs, and hide the
definition of __pv_cpu_mask behind CONFIG_SMP, too.


diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5e78e01ca3b4..13c6b1c7c01b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -451,6 +451,8 @@ static void __init sev_map_percpu_data(void)
        }
 }

+#ifdef CONFIG_SMP
+
 static bool pv_tlb_flush_supported(void)
 {
        return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
@@ -460,8 +462,6 @@ static bool pv_tlb_flush_supported(void)

 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);

-#ifdef CONFIG_SMP
-
 static bool pv_ipi_supported(void)
 {
        return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
@@ -574,45 +574,6 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
        }
 }

-static void __init kvm_smp_prepare_boot_cpu(void)
-{
-       /*
-        * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
-        * shares the guest physical address with the hypervisor.
-        */
-       sev_map_percpu_data();
-
-       kvm_guest_cpu_init();
-       native_smp_prepare_boot_cpu();
-       kvm_spinlock_init();
-}
-
-static void kvm_guest_cpu_offline(void)
-{
-       kvm_disable_steal_time();
-       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
-               wrmsrl(MSR_KVM_PV_EOI_EN, 0);
-       kvm_pv_disable_apf();
-       apf_task_wake_all();
-}
-
-static int kvm_cpu_online(unsigned int cpu)
-{
-       local_irq_disable();
-       kvm_guest_cpu_init();
-       local_irq_enable();
-       return 0;
-}
-
-static int kvm_cpu_down_prepare(unsigned int cpu)
-{
-       local_irq_disable();
-       kvm_guest_cpu_offline();
-       local_irq_enable();
-       return 0;
-}
-#endif
-
 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
                        const struct flush_tlb_info *info)
 {
@@ -639,6 +600,63 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
        native_flush_tlb_others(flushmask, info);
 }

+static void __init kvm_smp_prepare_boot_cpu(void)
+{
+       /*
+        * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
+        * shares the guest physical address with the hypervisor.
+        */
+       sev_map_percpu_data();
+
+       kvm_guest_cpu_init();
+       native_smp_prepare_boot_cpu();
+       kvm_spinlock_init();
+}
+
+static void kvm_guest_cpu_offline(void)
+{
+       kvm_disable_steal_time();
+       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+               wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+       kvm_pv_disable_apf();
+       apf_task_wake_all();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+       local_irq_disable();
+       kvm_guest_cpu_init();
+       local_irq_enable();
+       return 0;
+}
+
+static int kvm_cpu_down_prepare(unsigned int cpu)
+{
+       local_irq_disable();
+       kvm_guest_cpu_offline();
+       local_irq_enable();
+       return 0;
+}
+
+static __init int kvm_alloc_cpumask(void)
+{
+       int cpu;
+
+       if (!kvm_para_available() || nopv)
+               return 0;
+
+       if (pv_tlb_flush_supported() || pv_ipi_supported())
+               for_each_possible_cpu(cpu) {
+                       zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
+                               GFP_KERNEL, cpu_to_node(cpu));
+               }
+
+       return 0;
+}
+arch_initcall(kvm_alloc_cpumask);
+
+#endif
+
 static void __init kvm_guest_init(void)
 {
        int i;
@@ -653,21 +671,21 @@ static void __init kvm_guest_init(void)
                pv_ops.time.steal_clock = kvm_steal_clock;
        }

+       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+               apic_set_eoi_write(kvm_guest_apic_eoi_write);
+
+       if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
+               static_branch_enable(&kvm_async_pf_enabled);
+               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
+       }
+
+#ifdef CONFIG_SMP
        if (pv_tlb_flush_supported()) {
                pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
                pv_ops.mmu.tlb_remove_table = tlb_remove_table;
                pr_info("KVM setup pv remote TLB flush\n");
        }

-       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
-               apic_set_eoi_write(kvm_guest_apic_eoi_write);
-
-       if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
-               static_branch_enable(&kvm_async_pf_enabled);
-               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
-       }
-
-#ifdef CONFIG_SMP
        smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
        if (pv_sched_yield_supported()) {
                smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
@@ -734,7 +752,7 @@ static uint32_t __init kvm_detect(void)

 static void __init kvm_apic_init(void)
 {
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        if (pv_ipi_supported())
                kvm_setup_pv_ipi();
 #endif
@@ -794,31 +812,6 @@ static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);

-static __init int kvm_alloc_cpumask(void)
-{
-       int cpu;
-       bool alloc = false;
-
-       if (!kvm_para_available() || nopv)
-               return 0;
-
-       if (pv_tlb_flush_supported())
-               alloc = true;
-
-#if defined(CONFIG_SMP)
-       if (pv_ipi_supported())
-               alloc = true;
-#endif
-
-       if (alloc)
-               for_each_possible_cpu(cpu) {
-                       zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-                               GFP_KERNEL, cpu_to_node(cpu));
-               }
-
-       return 0;
-}
-arch_initcall(kvm_alloc_cpumask);

 #ifdef CONFIG_PARAVIRT_SPINLOCKS
Wanpeng Li April 9, 2021, 3:03 a.m. UTC | #2
On Fri, 9 Apr 2021 at 04:20, Sean Christopherson <seanjc@google.com> wrote:
>
> On Wed, Apr 07, 2021, Wanpeng Li wrote:
> > From: Wanpeng Li <wanpengli@tencent.com>
> >
> > Enable PV TLB shootdown when !CONFIG_SMP doesn't make sense. Let's move
> > it inside CONFIG_SMP. In addition, we can avoid alloc __pv_cpu_mask when
> > !CONFIG_SMP and get rid of 'alloc' variable in kvm_alloc_cpumask.
>
> ...
>
> > +static bool pv_tlb_flush_supported(void) { return false; }
> > +static bool pv_ipi_supported(void) { return false; }
> > +static void kvm_flush_tlb_others(const struct cpumask *cpumask,
> > +                     const struct flush_tlb_info *info) { }
> > +static void kvm_setup_pv_ipi(void) { }
>
> If you shuffle things around a bit more, you can avoid these stubs, and hide the
> definition of __pv_cpu_mask behind CONFIG_SMP, too.

Thanks, I will move around.

    Wanpeng
diff mbox series

Patch

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5e78e01..202e1f0 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -451,6 +451,11 @@  static void __init sev_map_percpu_data(void)
 	}
 }
 
+
+static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+
+#ifdef CONFIG_SMP
+
 static bool pv_tlb_flush_supported(void)
 {
 	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
@@ -458,10 +463,6 @@  static bool pv_tlb_flush_supported(void)
 		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
 }
 
-static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
-
-#ifdef CONFIG_SMP
-
 static bool pv_ipi_supported(void)
 {
 	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
@@ -574,6 +575,32 @@  static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
 	}
 }
 
+static void kvm_flush_tlb_others(const struct cpumask *cpumask,
+			const struct flush_tlb_info *info)
+{
+	u8 state;
+	int cpu;
+	struct kvm_steal_time *src;
+	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+
+	cpumask_copy(flushmask, cpumask);
+	/*
+	 * We have to call flush only on online vCPUs. And
+	 * queue flush_on_enter for pre-empted vCPUs
+	 */
+	for_each_cpu(cpu, flushmask) {
+		src = &per_cpu(steal_time, cpu);
+		state = READ_ONCE(src->preempted);
+		if ((state & KVM_VCPU_PREEMPTED)) {
+			if (try_cmpxchg(&src->preempted, &state,
+					state | KVM_VCPU_FLUSH_TLB))
+				__cpumask_clear_cpu(cpu, flushmask);
+		}
+	}
+
+	native_flush_tlb_others(flushmask, info);
+}
+
 static void __init kvm_smp_prepare_boot_cpu(void)
 {
 	/*
@@ -611,33 +638,16 @@  static int kvm_cpu_down_prepare(unsigned int cpu)
 	local_irq_enable();
 	return 0;
 }
-#endif
 
-static void kvm_flush_tlb_others(const struct cpumask *cpumask,
-			const struct flush_tlb_info *info)
-{
-	u8 state;
-	int cpu;
-	struct kvm_steal_time *src;
-	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+#else
 
-	cpumask_copy(flushmask, cpumask);
-	/*
-	 * We have to call flush only on online vCPUs. And
-	 * queue flush_on_enter for pre-empted vCPUs
-	 */
-	for_each_cpu(cpu, flushmask) {
-		src = &per_cpu(steal_time, cpu);
-		state = READ_ONCE(src->preempted);
-		if ((state & KVM_VCPU_PREEMPTED)) {
-			if (try_cmpxchg(&src->preempted, &state,
-					state | KVM_VCPU_FLUSH_TLB))
-				__cpumask_clear_cpu(cpu, flushmask);
-		}
-	}
+static bool pv_tlb_flush_supported(void) { return false; }
+static bool pv_ipi_supported(void) { return false; }
+static void kvm_flush_tlb_others(const struct cpumask *cpumask,
+			const struct flush_tlb_info *info) { }
+static void kvm_setup_pv_ipi(void) { }
 
-	native_flush_tlb_others(flushmask, info);
-}
+#endif
 
 static void __init kvm_guest_init(void)
 {
@@ -734,10 +744,8 @@  static uint32_t __init kvm_detect(void)
 
 static void __init kvm_apic_init(void)
 {
-#if defined(CONFIG_SMP)
 	if (pv_ipi_supported())
 		kvm_setup_pv_ipi();
-#endif
 }
 
 static bool __init kvm_msi_ext_dest_id(void)
@@ -797,20 +805,11 @@  arch_initcall(activate_jump_labels);
 static __init int kvm_alloc_cpumask(void)
 {
 	int cpu;
-	bool alloc = false;
 
 	if (!kvm_para_available() || nopv)
 		return 0;
 
-	if (pv_tlb_flush_supported())
-		alloc = true;
-
-#if defined(CONFIG_SMP)
-	if (pv_ipi_supported())
-		alloc = true;
-#endif
-
-	if (alloc)
+	if (pv_tlb_flush_supported() || pv_ipi_supported())
 		for_each_possible_cpu(cpu) {
 			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
 				GFP_KERNEL, cpu_to_node(cpu));