diff mbox series

[05/28] KVM: nSVM: correctly inject INIT vmexits

Message ID 20200526172308.111575-6-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: event fixes and migration support | expand

Commit Message

Paolo Bonzini May 26, 2020, 5:22 p.m. UTC
The usual drill at this point, except there is no code to remove because this
case was not handled at all.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

Comments

Krish Sadhukhan May 29, 2020, 6:46 a.m. UTC | #1
On 5/26/20 10:22 AM, Paolo Bonzini wrote:
> The usual drill at this point, except there is no code to remove because this
> case was not handled at all.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>   arch/x86/kvm/svm/nested.c | 27 +++++++++++++++++++++++++++
>   1 file changed, 27 insertions(+)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index bbf991cfe24b..166b88fc9509 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -25,6 +25,7 @@
>   #include "trace.h"
>   #include "mmu.h"
>   #include "x86.h"
> +#include "lapic.h"
>   #include "svm.h"
>   
>   static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
> @@ -788,11 +789,37 @@ static void nested_svm_intr(struct vcpu_svm *svm)
>   	nested_svm_vmexit(svm);
>   }
>   
> +static inline bool nested_exit_on_init(struct vcpu_svm *svm)
> +{
> +	return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
> +}
> +
> +static void nested_svm_init(struct vcpu_svm *svm)

Should this be named nested_svm_inject_init_vmexit in accordance with 
nested_svm_inject_exception_vmexit that you did in patch# 3 ?

> +{
> +	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
> +	svm->vmcb->control.exit_info_1 = 0;
> +	svm->vmcb->control.exit_info_2 = 0;
> +
> +	nested_svm_vmexit(svm);
> +}
> +
> +
>   static int svm_check_nested_events(struct kvm_vcpu *vcpu)
>   {
>   	struct vcpu_svm *svm = to_svm(vcpu);
>   	bool block_nested_events =
>   		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
> +	struct kvm_lapic *apic = vcpu->arch.apic;
> +
> +	if (lapic_in_kernel(vcpu) &&
> +	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
> +		if (block_nested_events)
> +			return -EBUSY;
> +		if (!nested_exit_on_init(svm))
> +			return 0;
> +		nested_svm_init(svm);
> +		return 0;
> +	}
>   
>   	if (vcpu->arch.exception.pending) {
>   		if (block_nested_events)
Paolo Bonzini May 29, 2020, 8:47 a.m. UTC | #2
On 29/05/20 08:46, Krish Sadhukhan wrote:
>>
>> +static void nested_svm_init(struct vcpu_svm *svm)
> 
> Should this be named nested_svm_inject_init_vmexit in accordance with
> nested_svm_inject_exception_vmexit that you did in patch# 3 ?

There's also nested_svm_intr and nested_svm_nmi.  I'll rename all of
them, but it will be a follow up.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index bbf991cfe24b..166b88fc9509 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -25,6 +25,7 @@ 
 #include "trace.h"
 #include "mmu.h"
 #include "x86.h"
+#include "lapic.h"
 #include "svm.h"
 
 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -788,11 +789,37 @@  static void nested_svm_intr(struct vcpu_svm *svm)
 	nested_svm_vmexit(svm);
 }
 
+static inline bool nested_exit_on_init(struct vcpu_svm *svm)
+{
+	return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
+}
+
+static void nested_svm_init(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
+	svm->vmcb->control.exit_info_1 = 0;
+	svm->vmcb->control.exit_info_2 = 0;
+
+	nested_svm_vmexit(svm);
+}
+
+
 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	bool block_nested_events =
 		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	if (lapic_in_kernel(vcpu) &&
+	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
+		if (block_nested_events)
+			return -EBUSY;
+		if (!nested_exit_on_init(svm))
+			return 0;
+		nested_svm_init(svm);
+		return 0;
+	}
 
 	if (vcpu->arch.exception.pending) {
 		if (block_nested_events)