diff mbox series

[RESEND,07/30] KVM: x86: nSVM: deal with L1 hypervisor that intercepts interrupts but lets L2 control them

Message ID 20220207155447.840194-8-mlevitsk@redhat.com (mailing list archive)
State New, archived
Headers show
Series My patch queue | expand

Commit Message

Maxim Levitsky Feb. 7, 2022, 3:54 p.m. UTC
Fix a corner case in which the L1 hypervisor intercepts
interrupts (INTERCEPT_INTR) and either doesn't set
virtual interrupt masking (V_INTR_MASKING) or enters a
nested guest with EFLAGS.IF disabled prior to the entry.

In this case, despite the fact that L1 intercepts the interrupts,
KVM still needs to set up an interrupt window to wait before
injecting the INTR vmexit.

Currently the KVM instead enters an endless loop of 'req_immediate_exit'.

Exactly the same issue also happens for SMIs and NMI.
Fix this as well.

Note that on VMX, this case is impossible as there is only
'vmexit on external interrupts' execution control which either set,
in which case both host and guest's EFLAGS.IF
are ignored, or not set, in which case no VMexits are delivered.


Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
 arch/x86/kvm/svm/svm.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

Comments

Paolo Bonzini Feb. 8, 2022, 11:33 a.m. UTC | #1
On 2/7/22 16:54, Maxim Levitsky wrote:
> Fix a corner case in which the L1 hypervisor intercepts
> interrupts (INTERCEPT_INTR) and either doesn't set
> virtual interrupt masking (V_INTR_MASKING) or enters a
> nested guest with EFLAGS.IF disabled prior to the entry.
> 
> In this case, despite the fact that L1 intercepts the interrupts,
> KVM still needs to set up an interrupt window to wait before
> injecting the INTR vmexit.
> 
> Currently the KVM instead enters an endless loop of 'req_immediate_exit'.
> 
> Exactly the same issue also happens for SMIs and NMI.
> Fix this as well.
> 
> Note that on VMX, this case is impossible as there is only
> 'vmexit on external interrupts' execution control which either set,
> in which case both host and guest's EFLAGS.IF
> are ignored, or not set, in which case no VMexits are delivered.
> 
> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
> ---
>   arch/x86/kvm/svm/svm.c | 17 +++++++++++++----
>   1 file changed, 13 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 9a4e299ed5673..22e614008cf59 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3372,11 +3372,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
>   	if (svm->nested.nested_run_pending)
>   		return -EBUSY;
>   
> +	if (svm_nmi_blocked(vcpu))
> +		return 0;
> +
>   	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
>   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
>   		return -EBUSY;
> -
> -	return !svm_nmi_blocked(vcpu);
> +	return 1;
>   }
>   
>   static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
> @@ -3428,9 +3430,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
>   static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
>   {
>   	struct vcpu_svm *svm = to_svm(vcpu);
> +
>   	if (svm->nested.nested_run_pending)
>   		return -EBUSY;
>   
> +	if (svm_interrupt_blocked(vcpu))
> +		return 0;
> +
>   	/*
>   	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
>   	 * e.g. if the IRQ arrived asynchronously after checking nested events.
> @@ -3438,7 +3444,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
>   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
>   		return -EBUSY;
>   
> -	return !svm_interrupt_blocked(vcpu);
> +	return 1;
>   }
>   
>   static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
> @@ -4169,11 +4175,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
>   	if (svm->nested.nested_run_pending)
>   		return -EBUSY;
>   
> +	if (svm_smi_blocked(vcpu))
> +		return 0;
> +
>   	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
>   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
>   		return -EBUSY;
>   
> -	return !svm_smi_blocked(vcpu);
> +	return 1;
>   }
>   
>   static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)

Can you prepare a testcase for at least the interrupt case?

Thanks,

Paolo
Maxim Levitsky Feb. 8, 2022, 11:55 a.m. UTC | #2
On Tue, 2022-02-08 at 12:33 +0100, Paolo Bonzini wrote:
> On 2/7/22 16:54, Maxim Levitsky wrote:
> > Fix a corner case in which the L1 hypervisor intercepts
> > interrupts (INTERCEPT_INTR) and either doesn't set
> > virtual interrupt masking (V_INTR_MASKING) or enters a
> > nested guest with EFLAGS.IF disabled prior to the entry.
> > 
> > In this case, despite the fact that L1 intercepts the interrupts,
> > KVM still needs to set up an interrupt window to wait before
> > injecting the INTR vmexit.
> > 
> > Currently the KVM instead enters an endless loop of 'req_immediate_exit'.
> > 
> > Exactly the same issue also happens for SMIs and NMI.
> > Fix this as well.
> > 
> > Note that on VMX, this case is impossible as there is only
> > 'vmexit on external interrupts' execution control which either set,
> > in which case both host and guest's EFLAGS.IF
> > are ignored, or not set, in which case no VMexits are delivered.
> > 
> > Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
> > ---
> >   arch/x86/kvm/svm/svm.c | 17 +++++++++++++----
> >   1 file changed, 13 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index 9a4e299ed5673..22e614008cf59 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -3372,11 +3372,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> >   	if (svm->nested.nested_run_pending)
> >   		return -EBUSY;
> >   
> > +	if (svm_nmi_blocked(vcpu))
> > +		return 0;
> > +
> >   	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
> >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
> >   		return -EBUSY;
> > -
> > -	return !svm_nmi_blocked(vcpu);
> > +	return 1;
> >   }
> >   
> >   static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
> > @@ -3428,9 +3430,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
> >   static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> >   {
> >   	struct vcpu_svm *svm = to_svm(vcpu);
> > +
> >   	if (svm->nested.nested_run_pending)
> >   		return -EBUSY;
> >   
> > +	if (svm_interrupt_blocked(vcpu))
> > +		return 0;
> > +
> >   	/*
> >   	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
> >   	 * e.g. if the IRQ arrived asynchronously after checking nested events.
> > @@ -3438,7 +3444,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
> >   		return -EBUSY;
> >   
> > -	return !svm_interrupt_blocked(vcpu);
> > +	return 1;
> >   }
> >   
> >   static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
> > @@ -4169,11 +4175,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> >   	if (svm->nested.nested_run_pending)
> >   		return -EBUSY;
> >   
> > +	if (svm_smi_blocked(vcpu))
> > +		return 0;
> > +
> >   	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
> >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
> >   		return -EBUSY;
> >   
> > -	return !svm_smi_blocked(vcpu);
> > +	return 1;
> >   }
> >   
> >   static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
> 
> Can you prepare a testcase for at least the interrupt case?


Yep, I already wrote a kvm unit tests for all the cases, and I will send them very soon.

Best regards,
	Maxim Levitsky
> 
> Thanks,
> 
> Paolo
>
Maxim Levitsky Feb. 8, 2022, 12:24 p.m. UTC | #3
On Tue, 2022-02-08 at 13:55 +0200, Maxim Levitsky wrote:
> On Tue, 2022-02-08 at 12:33 +0100, Paolo Bonzini wrote:
> > On 2/7/22 16:54, Maxim Levitsky wrote:
> > > Fix a corner case in which the L1 hypervisor intercepts
> > > interrupts (INTERCEPT_INTR) and either doesn't set
> > > virtual interrupt masking (V_INTR_MASKING) or enters a
> > > nested guest with EFLAGS.IF disabled prior to the entry.
> > > 
> > > In this case, despite the fact that L1 intercepts the interrupts,
> > > KVM still needs to set up an interrupt window to wait before
> > > injecting the INTR vmexit.
> > > 
> > > Currently the KVM instead enters an endless loop of 'req_immediate_exit'.
> > > 
> > > Exactly the same issue also happens for SMIs and NMI.
> > > Fix this as well.
> > > 
> > > Note that on VMX, this case is impossible as there is only
> > > 'vmexit on external interrupts' execution control which either set,
> > > in which case both host and guest's EFLAGS.IF
> > > are ignored, or not set, in which case no VMexits are delivered.
> > > 
> > > Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
> > > ---
> > >   arch/x86/kvm/svm/svm.c | 17 +++++++++++++----
> > >   1 file changed, 13 insertions(+), 4 deletions(-)
> > > 
> > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > > index 9a4e299ed5673..22e614008cf59 100644
> > > --- a/arch/x86/kvm/svm/svm.c
> > > +++ b/arch/x86/kvm/svm/svm.c
> > > @@ -3372,11 +3372,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> > >   	if (svm->nested.nested_run_pending)
> > >   		return -EBUSY;
> > >   
> > > +	if (svm_nmi_blocked(vcpu))
> > > +		return 0;
> > > +
> > >   	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
> > >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
> > >   		return -EBUSY;
> > > -
> > > -	return !svm_nmi_blocked(vcpu);
> > > +	return 1;
> > >   }
> > >   
> > >   static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
> > > @@ -3428,9 +3430,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
> > >   static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> > >   {
> > >   	struct vcpu_svm *svm = to_svm(vcpu);
> > > +
> > >   	if (svm->nested.nested_run_pending)
> > >   		return -EBUSY;
> > >   
> > > +	if (svm_interrupt_blocked(vcpu))
> > > +		return 0;
> > > +
> > >   	/*
> > >   	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
> > >   	 * e.g. if the IRQ arrived asynchronously after checking nested events.
> > > @@ -3438,7 +3444,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> > >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
> > >   		return -EBUSY;
> > >   
> > > -	return !svm_interrupt_blocked(vcpu);
> > > +	return 1;
> > >   }
> > >   
> > >   static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
> > > @@ -4169,11 +4175,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
> > >   	if (svm->nested.nested_run_pending)
> > >   		return -EBUSY;
> > >   
> > > +	if (svm_smi_blocked(vcpu))
> > > +		return 0;
> > > +
> > >   	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
> > >   	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
> > >   		return -EBUSY;
> > >   
> > > -	return !svm_smi_blocked(vcpu);
> > > +	return 1;
> > >   }
> > >   
> > >   static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
> > 
> > Can you prepare a testcase for at least the interrupt case?
> 
> Yep, I already wrote a kvm unit tests for all the cases, and I will send them very soon.

Done.

I also included tests for LBR virtualization which I think I already posted but I am not sure.

Best regards,
	Maxim Levitsky
> 
> Best regards,
> 	Maxim Levitsky
> > Thanks,
> > 
> > Paolo
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9a4e299ed5673..22e614008cf59 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3372,11 +3372,13 @@  static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 	if (svm->nested.nested_run_pending)
 		return -EBUSY;
 
+	if (svm_nmi_blocked(vcpu))
+		return 0;
+
 	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
 		return -EBUSY;
-
-	return !svm_nmi_blocked(vcpu);
+	return 1;
 }
 
 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3428,9 +3430,13 @@  bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
+
 	if (svm->nested.nested_run_pending)
 		return -EBUSY;
 
+	if (svm_interrupt_blocked(vcpu))
+		return 0;
+
 	/*
 	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
 	 * e.g. if the IRQ arrived asynchronously after checking nested events.
@@ -3438,7 +3444,7 @@  static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
 		return -EBUSY;
 
-	return !svm_interrupt_blocked(vcpu);
+	return 1;
 }
 
 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -4169,11 +4175,14 @@  static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 	if (svm->nested.nested_run_pending)
 		return -EBUSY;
 
+	if (svm_smi_blocked(vcpu))
+		return 0;
+
 	/* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
 	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
 		return -EBUSY;
 
-	return !svm_smi_blocked(vcpu);
+	return 1;
 }
 
 static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)