diff mbox series

[2/2,V2] KVM: VMX: update vcpu posted-interrupt descriptor when assigning device

Message ID 20210506192125.GA350334@fuller.cnet (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Marcelo Tosatti May 6, 2021, 7:21 p.m. UTC
For VMX, when a vcpu enters HLT emulation, pi_post_block will:

1) Add vcpu to per-cpu list of blocked vcpus.

2) Program the posted-interrupt descriptor "notification vector" 
to POSTED_INTR_WAKEUP_VECTOR

With interrupt remapping, an interrupt will set the PIR bit for the 
vector programmed for the device on the CPU, test-and-set the 
ON bit on the posted interrupt descriptor, and if the ON bit is clear
generate an interrupt for the notification vector.

This way, the target CPU wakes upon a device interrupt and wakes up
the target vcpu.

Problem is that pi_post_block only programs the notification vector
if kvm_arch_has_assigned_device() is true. Its possible for the
following to happen:

1) vcpu V HLTs on pcpu P, kvm_arch_has_assigned_device is false,
notification vector is not programmed
2) device is assigned to VM
3) device interrupts vcpu V, sets ON bit (notification vector not programmed,
so pcpu P remains in idle)
4) vcpu 0 IPIs vcpu V (in guest), but since pi descriptor ON bit is set,
kvm_vcpu_kick is skipped
5) vcpu 0 busy spins on vcpu V's response for several seconds, until
RCU watchdog NMIs all vCPUs.

To fix this, use the start_assignment kvm_x86_ops callback to program the
notification vector when assigned device count changes from 0 to 1.

Reported-by: Pei Zhang <pezhang@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

---

v2: add vmx_pi_start_assignment to vmx's kvm_x86_ops

Comments

Sean Christopherson May 6, 2021, 9:35 p.m. UTC | #1
On Thu, May 06, 2021, Marcelo Tosatti wrote:
> Index: kvm/arch/x86/kvm/vmx/posted_intr.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/vmx/posted_intr.c
> +++ kvm/arch/x86/kvm/vmx/posted_intr.c
> @@ -114,7 +114,7 @@ static void __pi_post_block(struct kvm_v
>  	} while (cmpxchg64(&pi_desc->control, old.control,
>  			   new.control) != old.control);
>  
> -	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
> +	if (vcpu->pre_pcpu != -1) {
>  		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
>  		list_del(&vcpu->blocked_vcpu_list);
>  		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
> @@ -135,20 +135,13 @@ static void __pi_post_block(struct kvm_v
>   *   this case, return 1, otherwise, return 0.
>   *
>   */
> -int pi_pre_block(struct kvm_vcpu *vcpu)
> +static int __pi_pre_block(struct kvm_vcpu *vcpu)
>  {
>  	unsigned int dest;
>  	struct pi_desc old, new;
>  	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
>  
> -	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
> -		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
> -		!kvm_vcpu_apicv_active(vcpu))
> -		return 0;
> -
> -	WARN_ON(irqs_disabled());
> -	local_irq_disable();
> -	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
> +	if (vcpu->pre_pcpu == -1) {
>  		vcpu->pre_pcpu = vcpu->cpu;
>  		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
>  		list_add_tail(&vcpu->blocked_vcpu_list,
> @@ -188,12 +181,33 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
>  	if (pi_test_on(pi_desc) == 1)
>  		__pi_post_block(vcpu);
>  
> +	return (vcpu->pre_pcpu == -1);

Nothing checks the return of __pi_pre_block(), this can be dropped and the
helper can be a void return.

> +}
> +
> +int pi_pre_block(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +	vmx->in_blocked_section = true;
> +
> +	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
> +		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
> +		!kvm_vcpu_apicv_active(vcpu))

Opportunistically fix the indentation?

> +		return 0;
> +
> +	WARN_ON(irqs_disabled());
> +	local_irq_disable();
> +	__pi_pre_block(vcpu);
>  	local_irq_enable();
> +
>  	return (vcpu->pre_pcpu == -1);
>  }
>  
>  void pi_post_block(struct kvm_vcpu *vcpu)
>  {
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +	vmx->in_blocked_section = false;
>  	if (vcpu->pre_pcpu == -1)
>  		return;
>  
> @@ -236,6 +250,52 @@ bool pi_has_pending_interrupt(struct kvm
>  		(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
>  }
>  
> +static void pi_update_wakeup_vector(void *data)
> +{
> +	struct vcpu_vmx *vmx;
> +	struct kvm_vcpu *vcpu = data;
> +
> +	vmx = to_vmx(vcpu);
> +
> +	/* race with pi_post_block ? */
> +	if (vcpu->pre_pcpu != -1)

This seems wrong.  The funky code in __pi_pre_block() regarding pre_cpu muddies
the waters, but I don't think it's safe to call __pi_pre_block() from a pCPU
other than the pCPU that is associated with the vCPU.

If the vCPU is migrated after vmx_pi_start_assignment() grabs vcpu->cpu but
before the IPI arrives (to run pi_update_wakeup_vector()), then it's possible
that a different pCPU could be running __pi_pre_block() concurrently with this
code.  If that happens, both pcPUs could see "vcpu->pre_cpu == -1" and corrupt
the list due to a double list_add_tail.

The existing code is unnecessarily confusing, but unless I'm missing something,
it's guaranteed to call pi_pre_block() from the pCPU that is associated with the
pCPU, i.e. arguably it could/should use this_cpu_ptr().  Because the existing
code grabs vcpu->cpu with IRQs disabled and is called only from KVM_RUN,
vcpu->cpu is guaranteed to match the current pCPU since vcpu->cpu will be set to
the current pCPU when the vCPU is scheduled in.

Assuming my analysis is correct (definitely not guaranteed), I'm struggling to
come up with an elegant solution.  But, do we need an elegant solution?  E.g.
can the start_assignment() hook simply kick all vCPUs with APICv active?

> +		return;
> +
> +	if (!vmx->in_blocked_section)
> +		return;
> +
> +	__pi_pre_block(vcpu);
> +}
> +
> +void vmx_pi_start_assignment(struct kvm *kvm, int device_count)
> +{
> +	struct kvm_vcpu *vcpu;
> +	int i;
> +
> +	if (!irq_remapping_cap(IRQ_POSTING_CAP))
> +		return;
> +
> +	/* only care about first device assignment */
> +	if (device_count != 1)
> +		return;
> +
> +	/* Update wakeup vector and add vcpu to blocked_vcpu_list */
> +	kvm_for_each_vcpu(i, vcpu, kvm) {
> +		struct vcpu_vmx *vmx = to_vmx(vcpu);
> +		int pcpu;
> +
> +		if (!kvm_vcpu_apicv_active(vcpu))
> +			continue;
> +
> +		preempt_disable();

Any reason not to do "cpu = get_cpu()"?  Might make sense to do that outside of
the for-loop, too.

> +		pcpu = vcpu->cpu;
> +		if (vmx->in_blocked_section && vcpu->pre_pcpu == -1 &&
> +		    pcpu != -1 && pcpu != smp_processor_id())
> +			smp_call_function_single(pcpu, pi_update_wakeup_vector,
> +						 vcpu, 1);
> +		preempt_enable();
> +	}
> +}
Marcelo Tosatti May 7, 2021, 12:11 p.m. UTC | #2
Hi Sean,

On Thu, May 06, 2021 at 09:35:46PM +0000, Sean Christopherson wrote:
> On Thu, May 06, 2021, Marcelo Tosatti wrote:
> > Index: kvm/arch/x86/kvm/vmx/posted_intr.c
> > ===================================================================
> > --- kvm.orig/arch/x86/kvm/vmx/posted_intr.c
> > +++ kvm/arch/x86/kvm/vmx/posted_intr.c
> > @@ -114,7 +114,7 @@ static void __pi_post_block(struct kvm_v
> >  	} while (cmpxchg64(&pi_desc->control, old.control,
> >  			   new.control) != old.control);
> >  
> > -	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
> > +	if (vcpu->pre_pcpu != -1) {
> >  		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
> >  		list_del(&vcpu->blocked_vcpu_list);
> >  		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
> > @@ -135,20 +135,13 @@ static void __pi_post_block(struct kvm_v
> >   *   this case, return 1, otherwise, return 0.
> >   *
> >   */
> > -int pi_pre_block(struct kvm_vcpu *vcpu)
> > +static int __pi_pre_block(struct kvm_vcpu *vcpu)
> >  {
> >  	unsigned int dest;
> >  	struct pi_desc old, new;
> >  	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
> >  
> > -	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
> > -		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
> > -		!kvm_vcpu_apicv_active(vcpu))
> > -		return 0;
> > -
> > -	WARN_ON(irqs_disabled());
> > -	local_irq_disable();
> > -	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
> > +	if (vcpu->pre_pcpu == -1) {
> >  		vcpu->pre_pcpu = vcpu->cpu;
> >  		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
> >  		list_add_tail(&vcpu->blocked_vcpu_list,
> > @@ -188,12 +181,33 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
> >  	if (pi_test_on(pi_desc) == 1)
> >  		__pi_post_block(vcpu);
> >  
> > +	return (vcpu->pre_pcpu == -1);
> 
> Nothing checks the return of __pi_pre_block(), this can be dropped and the
> helper can be a void return.

Done.

> > +}
> > +
> > +int pi_pre_block(struct kvm_vcpu *vcpu)
> > +{
> > +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> > +
> > +	vmx->in_blocked_section = true;
> > +
> > +	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
> > +		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
> > +		!kvm_vcpu_apicv_active(vcpu))
> 
> Opportunistically fix the indentation?

Done.

> > +		return 0;
> > +
> > +	WARN_ON(irqs_disabled());
> > +	local_irq_disable();
> > +	__pi_pre_block(vcpu);
> >  	local_irq_enable();
> > +
> >  	return (vcpu->pre_pcpu == -1);
> >  }
> >  
> >  void pi_post_block(struct kvm_vcpu *vcpu)
> >  {
> > +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> > +
> > +	vmx->in_blocked_section = false;
> >  	if (vcpu->pre_pcpu == -1)
> >  		return;
> >  
> > @@ -236,6 +250,52 @@ bool pi_has_pending_interrupt(struct kvm
> >  		(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
> >  }
> >  
> > +static void pi_update_wakeup_vector(void *data)
> > +{
> > +	struct vcpu_vmx *vmx;
> > +	struct kvm_vcpu *vcpu = data;
> > +
> > +	vmx = to_vmx(vcpu);
> > +
> > +	/* race with pi_post_block ? */
> > +	if (vcpu->pre_pcpu != -1)
> 
> This seems wrong.  The funky code in __pi_pre_block() regarding pre_cpu muddies
> the waters, but I don't think it's safe to call __pi_pre_block() from a pCPU
> other than the pCPU that is associated with the vCPU.

From Intel's manual:

"29.6 POSTED-INTERRUPT PROCESSING

...

Use of the posted-interrupt descriptor differs from that of other
data structures that are referenced by pointers in a VMCS. There is a
general requirement that software ensure that each such data structure
is modified only when no logical processor with a current VMCS that
references it is in VMX non-root operation. That requirement does not
apply to the posted-interrupt descriptor. There is a requirement,
however, that such modifications be done using locked read-modify-write
instructions."

> If the vCPU is migrated after vmx_pi_start_assignment() grabs vcpu->cpu but
> before the IPI arrives (to run pi_update_wakeup_vector()), then it's possible
> that a different pCPU could be running __pi_pre_block() concurrently with this
> code.  If that happens, both pcPUs could see "vcpu->pre_cpu == -1" and corrupt
> the list due to a double list_add_tail.

Good point.

> The existing code is unnecessarily confusing, but unless I'm missing something,
> it's guaranteed to call pi_pre_block() from the pCPU that is associated with the
> pCPU, i.e. arguably it could/should use this_cpu_ptr(). 

Well problem is it might not exit kvm_vcpu_block(). However that can be
fixed.


>  Because the existing
> code grabs vcpu->cpu with IRQs disabled and is called only from KVM_RUN,
> vcpu->cpu is guaranteed to match the current pCPU since vcpu->cpu will be set to
> the current pCPU when the vCPU is scheduled in.
> 
> Assuming my analysis is correct (definitely not guaranteed), I'm struggling to
> come up with an elegant solution.  But, do we need an elegant solution?  E.g.
> can the start_assignment() hook simply kick all vCPUs with APICv active?
> 
> > +		return;
> > +
> > +	if (!vmx->in_blocked_section)
> > +		return;
> > +
> > +	__pi_pre_block(vcpu);
> > +}
> > +
> > +void vmx_pi_start_assignment(struct kvm *kvm, int device_count)
> > +{
> > +	struct kvm_vcpu *vcpu;
> > +	int i;
> > +
> > +	if (!irq_remapping_cap(IRQ_POSTING_CAP))
> > +		return;
> > +
> > +	/* only care about first device assignment */
> > +	if (device_count != 1)
> > +		return;
> > +
> > +	/* Update wakeup vector and add vcpu to blocked_vcpu_list */
> > +	kvm_for_each_vcpu(i, vcpu, kvm) {
> > +		struct vcpu_vmx *vmx = to_vmx(vcpu);
> > +		int pcpu;
> > +
> > +		if (!kvm_vcpu_apicv_active(vcpu))
> > +			continue;
> > +
> > +		preempt_disable();
> 
> Any reason not to do "cpu = get_cpu()"?  Might make sense to do that outside of
> the for-loop, too.

kvm_vcpu_kick seems cleaner, just need to add another arch
hook to allow kvm_vcpu_block() to return.

Thanks for the review! Will resend after testing.
diff mbox series

Patch

Index: kvm/arch/x86/kvm/vmx/posted_intr.c
===================================================================
--- kvm.orig/arch/x86/kvm/vmx/posted_intr.c
+++ kvm/arch/x86/kvm/vmx/posted_intr.c
@@ -114,7 +114,7 @@  static void __pi_post_block(struct kvm_v
 	} while (cmpxchg64(&pi_desc->control, old.control,
 			   new.control) != old.control);
 
-	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+	if (vcpu->pre_pcpu != -1) {
 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
 		list_del(&vcpu->blocked_vcpu_list);
 		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
@@ -135,20 +135,13 @@  static void __pi_post_block(struct kvm_v
  *   this case, return 1, otherwise, return 0.
  *
  */
-int pi_pre_block(struct kvm_vcpu *vcpu)
+static int __pi_pre_block(struct kvm_vcpu *vcpu)
 {
 	unsigned int dest;
 	struct pi_desc old, new;
 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
-		!kvm_vcpu_apicv_active(vcpu))
-		return 0;
-
-	WARN_ON(irqs_disabled());
-	local_irq_disable();
-	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+	if (vcpu->pre_pcpu == -1) {
 		vcpu->pre_pcpu = vcpu->cpu;
 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
 		list_add_tail(&vcpu->blocked_vcpu_list,
@@ -188,12 +181,33 @@  int pi_pre_block(struct kvm_vcpu *vcpu)
 	if (pi_test_on(pi_desc) == 1)
 		__pi_post_block(vcpu);
 
+	return (vcpu->pre_pcpu == -1);
+}
+
+int pi_pre_block(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	vmx->in_blocked_section = true;
+
+	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+		!irq_remapping_cap(IRQ_POSTING_CAP)  ||
+		!kvm_vcpu_apicv_active(vcpu))
+		return 0;
+
+	WARN_ON(irqs_disabled());
+	local_irq_disable();
+	__pi_pre_block(vcpu);
 	local_irq_enable();
+
 	return (vcpu->pre_pcpu == -1);
 }
 
 void pi_post_block(struct kvm_vcpu *vcpu)
 {
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	vmx->in_blocked_section = false;
 	if (vcpu->pre_pcpu == -1)
 		return;
 
@@ -236,6 +250,52 @@  bool pi_has_pending_interrupt(struct kvm
 		(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
 }
 
+static void pi_update_wakeup_vector(void *data)
+{
+	struct vcpu_vmx *vmx;
+	struct kvm_vcpu *vcpu = data;
+
+	vmx = to_vmx(vcpu);
+
+	/* race with pi_post_block ? */
+	if (vcpu->pre_pcpu != -1)
+		return;
+
+	if (!vmx->in_blocked_section)
+		return;
+
+	__pi_pre_block(vcpu);
+}
+
+void vmx_pi_start_assignment(struct kvm *kvm, int device_count)
+{
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	if (!irq_remapping_cap(IRQ_POSTING_CAP))
+		return;
+
+	/* only care about first device assignment */
+	if (device_count != 1)
+		return;
+
+	/* Update wakeup vector and add vcpu to blocked_vcpu_list */
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		struct vcpu_vmx *vmx = to_vmx(vcpu);
+		int pcpu;
+
+		if (!kvm_vcpu_apicv_active(vcpu))
+			continue;
+
+		preempt_disable();
+		pcpu = vcpu->cpu;
+		if (vmx->in_blocked_section && vcpu->pre_pcpu == -1 &&
+		    pcpu != -1 && pcpu != smp_processor_id())
+			smp_call_function_single(pcpu, pi_update_wakeup_vector,
+						 vcpu, 1);
+		preempt_enable();
+	}
+}
 
 /*
  * pi_update_irte - set IRTE for Posted-Interrupts
Index: kvm/arch/x86/kvm/vmx/posted_intr.h
===================================================================
--- kvm.orig/arch/x86/kvm/vmx/posted_intr.h
+++ kvm/arch/x86/kvm/vmx/posted_intr.h
@@ -95,5 +95,6 @@  void __init pi_init_cpu(int cpu);
 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
 		   bool set);
+void vmx_pi_start_assignment(struct kvm *kvm, int device_count);
 
 #endif /* __KVM_X86_VMX_POSTED_INTR_H */
Index: kvm/arch/x86/kvm/vmx/vmx.h
===================================================================
--- kvm.orig/arch/x86/kvm/vmx/vmx.h
+++ kvm/arch/x86/kvm/vmx/vmx.h
@@ -336,6 +336,9 @@  struct vcpu_vmx {
 		DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 		DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 	} shadow_msr_intercept;
+
+	/* true if vcpu is between pre_block and post_block */
+	bool in_blocked_section;
 };
 
 enum ept_pointers_status {
Index: kvm/arch/x86/kvm/vmx/vmx.c
===================================================================
--- kvm.orig/arch/x86/kvm/vmx/vmx.c
+++ kvm/arch/x86/kvm/vmx/vmx.c
@@ -7732,7 +7732,7 @@  static struct kvm_x86_ops vmx_x86_ops __
 	.nested_ops = &vmx_nested_ops,
 
 	.update_pi_irte = pi_update_irte,
-	.start_assignment = NULL,
+	.start_assignment = vmx_pi_start_assignment,
 
 #ifdef CONFIG_X86_64
 	.set_hv_timer = vmx_set_hv_timer,