diff mbox series

[v3] KVM: Check the allocation of pv cpu mask

Message ID 20200925180738.4426-1-lihaiwei.kernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v3] KVM: Check the allocation of pv cpu mask | expand

Commit Message

Haiwei Li Sept. 25, 2020, 6:07 p.m. UTC
From: Haiwei Li <lihaiwei@tencent.com>

check the allocation of per-cpu __pv_cpu_mask.

Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
---
v1 -> v2:
 * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
v2 -> v3:
 * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others

 arch/x86/kernel/kvm.c | 27 ++++++++++++++++++++++++---
 1 file changed, 24 insertions(+), 3 deletions(-)

Comments

Vitaly Kuznetsov Sept. 29, 2020, 11:40 a.m. UTC | #1
lihaiwei.kernel@gmail.com writes:

> From: Haiwei Li <lihaiwei@tencent.com>
>
> check the allocation of per-cpu __pv_cpu_mask.
>
> Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
> Signed-off-by: Haiwei Li <lihaiwei@tencent.com>
> ---
> v1 -> v2:
>  * add CONFIG_SMP for kvm_send_ipi_mask_allbutself to prevent build error
> v2 -> v3:
>  * always check the allocation of __pv_cpu_mask in kvm_flush_tlb_others
>
>  arch/x86/kernel/kvm.c | 27 ++++++++++++++++++++++++---
>  1 file changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 9663ba31347c..1e5da6db519c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -553,7 +553,6 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
>  static void kvm_setup_pv_ipi(void)
>  {
>  	apic->send_IPI_mask = kvm_send_ipi_mask;
> -	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;

I see that kvm_send_ipi_mask_allbutself() uses per CPU __pv_cpu_mask and
kvm_send_ipi_mask doesn't but assigning send_IPI_mask here and
send_IPI_mask_allbutself in kvm_alloc_cpumask() looks weird. Personally,
I'd prefet to move apic->send_IPI_mask to kvm_alloc_cpumask() too
(probably call kvm_setup_pv_ipi() and get rid of kvm_apic_init()
completely).

Alternatively, we can save the original apic->send_IPI_mask_allbutself
value to a variable and call it from kvm_send_ipi_mask_allbutself() when
__pv_cpu_mask wasn't allocated.

>  	pr_info("setup PV IPIs\n");
>  }
>  
> @@ -619,6 +618,11 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
>  	struct kvm_steal_time *src;
>  	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
>  
> +	if (unlikely(!flushmask)) {
> +		native_flush_tlb_others(cpumask, info);
> +		return;
> +	}
> +
>  	cpumask_copy(flushmask, cpumask);
>  	/*
>  	 * We have to call flush only on online vCPUs. And
> @@ -765,6 +769,14 @@ static __init int activate_jump_labels(void)
>  }
>  arch_initcall(activate_jump_labels);
>  
> +static void kvm_free_cpumask(void)
> +{
> +	unsigned int cpu;
> +
> +	for_each_possible_cpu(cpu)
> +		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
> +}
> +
>  static __init int kvm_alloc_cpumask(void)
>  {
>  	int cpu;
> @@ -783,11 +795,20 @@ static __init int kvm_alloc_cpumask(void)
>  
>  	if (alloc)
>  		for_each_possible_cpu(cpu) {
> -			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
> -				GFP_KERNEL, cpu_to_node(cpu));
> +			if (!zalloc_cpumask_var_node(
> +				per_cpu_ptr(&__pv_cpu_mask, cpu),
> +				GFP_KERNEL, cpu_to_node(cpu)))
> +				goto zalloc_cpumask_fail;
>  		}
>  
> +#if defined(CONFIG_SMP)
> +	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
> +#endif
>  	return 0;
> +
> +zalloc_cpumask_fail:
> +	kvm_free_cpumask();
> +	return -ENOMEM;
>  }
>  arch_initcall(kvm_alloc_cpumask);
diff mbox series

Patch

diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9663ba31347c..1e5da6db519c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -553,7 +553,6 @@  static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
 static void kvm_setup_pv_ipi(void)
 {
 	apic->send_IPI_mask = kvm_send_ipi_mask;
-	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
 	pr_info("setup PV IPIs\n");
 }
 
@@ -619,6 +618,11 @@  static void kvm_flush_tlb_others(const struct cpumask *cpumask,
 	struct kvm_steal_time *src;
 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
 
+	if (unlikely(!flushmask)) {
+		native_flush_tlb_others(cpumask, info);
+		return;
+	}
+
 	cpumask_copy(flushmask, cpumask);
 	/*
 	 * We have to call flush only on online vCPUs. And
@@ -765,6 +769,14 @@  static __init int activate_jump_labels(void)
 }
 arch_initcall(activate_jump_labels);
 
+static void kvm_free_cpumask(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
 static __init int kvm_alloc_cpumask(void)
 {
 	int cpu;
@@ -783,11 +795,20 @@  static __init int kvm_alloc_cpumask(void)
 
 	if (alloc)
 		for_each_possible_cpu(cpu) {
-			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
-				GFP_KERNEL, cpu_to_node(cpu));
+			if (!zalloc_cpumask_var_node(
+				per_cpu_ptr(&__pv_cpu_mask, cpu),
+				GFP_KERNEL, cpu_to_node(cpu)))
+				goto zalloc_cpumask_fail;
 		}
 
+#if defined(CONFIG_SMP)
+	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+#endif
 	return 0;
+
+zalloc_cpumask_fail:
+	kvm_free_cpumask();
+	return -ENOMEM;
 }
 arch_initcall(kvm_alloc_cpumask);