From patchwork Thu Apr 22 10:33:13 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Joerg Roedel X-Patchwork-Id: 94074 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o3MAXXfe021134 for ; Thu, 22 Apr 2010 10:34:52 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753830Ab0DVKdm (ORCPT ); Thu, 22 Apr 2010 06:33:42 -0400 Received: from va3ehsobe005.messaging.microsoft.com ([216.32.180.15]:19218 "EHLO VA3EHSOBE006.bigfish.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1753813Ab0DVKdj (ORCPT ); Thu, 22 Apr 2010 06:33:39 -0400 Received: from mail192-va3-R.bigfish.com (10.7.14.239) by VA3EHSOBE006.bigfish.com (10.7.40.26) with Microsoft SMTP Server id 8.1.240.5; Thu, 22 Apr 2010 10:33:36 +0000 Received: from mail192-va3 (localhost.localdomain [127.0.0.1]) by mail192-va3-R.bigfish.com (Postfix) with ESMTP id 9116E6A854C; Thu, 22 Apr 2010 10:33:36 +0000 (UTC) X-SpamScore: 1 X-BigFish: VPS1(zzab9bhzz1202hzz6d525hz32i2a8h87h43h61h) X-Spam-TCS-SCL: 0:0 X-FB-DOMAIN-IP-MATCH: fail Received: from mail192-va3 (localhost.localdomain [127.0.0.1]) by mail192-va3 (MessageSwitch) id 1271932415534542_5258; Thu, 22 Apr 2010 10:33:35 +0000 (UTC) Received: from VA3EHSMHS020.bigfish.com (unknown [10.7.14.250]) by mail192-va3.bigfish.com (Postfix) with ESMTP id 7E41A126004D; Thu, 22 Apr 2010 10:33:35 +0000 (UTC) Received: from ausb3extmailp01.amd.com (163.181.251.8) by VA3EHSMHS020.bigfish.com (10.7.99.30) with Microsoft SMTP Server (TLS) id 14.0.482.44; Thu, 22 Apr 2010 10:33:34 +0000 Received: from ausb3twp01.amd.com ([163.181.250.37]) by ausb3extmailp01.amd.com (Switch-3.2.7/Switch-3.2.7) with SMTP id o3MARR3O009660; Thu, 22 Apr 2010 05:27:30 -0500 X-WSS-ID: 0L19XBQ-01-039-02 X-M-MSG: Received: from sausexhtp01.amd.com (sausexhtp01.amd.com [163.181.3.165]) (using TLSv1 with cipher RC4-MD5 (128/128 bits)) (No client certificate requested) by ausb3twp01.amd.com (Tumbleweed MailGate 3.7.2) with ESMTP id 222011028A48; Thu, 22 Apr 2010 05:33:25 -0500 (CDT) Received: from storexhtp02.amd.com (172.24.4.4) by sausexhtp01.amd.com (163.181.3.165) with Microsoft SMTP Server (TLS) id 8.2.234.1; Thu, 22 Apr 2010 03:33:28 -0700 Received: from storexbh1.amd.com (10.1.1.17) by storexhtp02.amd.com (172.24.4.4) with Microsoft SMTP Server id 8.2.234.1; Thu, 22 Apr 2010 05:33:27 -0500 Received: from sausexmb1.amd.com ([163.181.3.156]) by storexbh1.amd.com with Microsoft SMTPSVC(6.0.3790.4675); Thu, 22 Apr 2010 06:33:27 -0400 Received: from seurexmb1.amd.com ([165.204.9.130]) by sausexmb1.amd.com with Microsoft SMTPSVC(6.0.3790.3959); Thu, 22 Apr 2010 05:33:26 -0500 Received: from lemmy.osrc.amd.com ([165.204.15.93]) by seurexmb1.amd.com with Microsoft SMTPSVC(6.0.3790.4675); Thu, 22 Apr 2010 12:33:19 +0200 Received: by lemmy.osrc.amd.com (Postfix, from userid 41430) id 11DC2C9BB1; Thu, 22 Apr 2010 12:33:19 +0200 (CEST) From: Joerg Roedel To: Avi Kivity , Marcelo Tosatti CC: kvm@vger.kernel.org, linux-kernel@vger.kernel.org, Joerg Roedel Subject: [PATCH 7/8] KVM: x86: Allow marking an exception as reinjected Date: Thu, 22 Apr 2010 12:33:13 +0200 Message-ID: <1271932394-13968-8-git-send-email-joerg.roedel@amd.com> X-Mailer: git-send-email 1.7.0.4 In-Reply-To: <1271932394-13968-1-git-send-email-joerg.roedel@amd.com> References: <1271932394-13968-1-git-send-email-joerg.roedel@amd.com> X-OriginalArrivalTime: 22 Apr 2010 10:33:19.0184 (UTC) FILETIME=[398F9500:01CAE207] MIME-Version: 1.0 X-Reverse-DNS: unknown Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Thu, 22 Apr 2010 10:35:03 +0000 (UTC) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 357573a..3f0007b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -312,6 +312,7 @@ struct kvm_vcpu_arch { struct kvm_queued_exception { bool pending; bool has_error_code; + bool reinject; u8 nr; u32 error_code; } exception; @@ -514,7 +515,8 @@ struct kvm_x86_ops { void (*set_irq)(struct kvm_vcpu *vcpu); void (*set_nmi)(struct kvm_vcpu *vcpu); void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, - bool has_error_code, u32 error_code); + bool has_error_code, u32 error_code, + bool reinject); int (*interrupt_allowed)(struct kvm_vcpu *vcpu); int (*nmi_allowed)(struct kvm_vcpu *vcpu); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); @@ -617,6 +619,8 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); +void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); +void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, u32 error_code); bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 65fc114..30e49fe 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -338,7 +338,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) } static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, - bool has_error_code, u32 error_code) + bool has_error_code, u32 error_code, + bool reinject) { struct vcpu_svm *svm = to_svm(vcpu); @@ -346,7 +347,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, * If we are within a nested VM we'd better #VMEXIT and let the guest * handle the exception */ - if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) + if (!reinject && + nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) { @@ -2918,8 +2920,6 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) svm->vcpu.arch.nmi_injected = true; break; case SVM_EXITINTINFO_TYPE_EXEPT: - if (is_nested(svm)) - break; /* * In case of software exceptions, do not reinject the vector, * but re-execute the instruction instead. Rewind RIP first @@ -2935,10 +2935,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) } if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { u32 err = svm->vmcb->control.exit_int_info_err; - kvm_queue_exception_e(&svm->vcpu, vector, err); + kvm_requeue_exception_e(&svm->vcpu, vector, err); } else - kvm_queue_exception(&svm->vcpu, vector); + kvm_requeue_exception(&svm->vcpu, vector); break; case SVM_EXITINTINFO_TYPE_INTR: kvm_queue_interrupt(&svm->vcpu, vector, false); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index dc023bc..285fe1a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -919,7 +919,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) } static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, - bool has_error_code, u32 error_code) + bool has_error_code, u32 error_code, + bool reinject) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c2c3c29..9c183cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -265,7 +265,8 @@ static int exception_class(int vector) } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, - unsigned nr, bool has_error, u32 error_code) + unsigned nr, bool has_error, u32 error_code, + bool reinject) { u32 prev_nr; int class1, class2; @@ -276,6 +277,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; + vcpu->arch.exception.reinject = true; return; } @@ -304,10 +306,16 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - kvm_multiple_exception(vcpu, nr, false, 0); + kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); +void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) +{ + kvm_multiple_exception(vcpu, nr, false, 0, true); +} +EXPORT_SYMBOL_GPL(kvm_requeue_exception); + void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, u32 error_code) { @@ -324,10 +332,16 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - kvm_multiple_exception(vcpu, nr, true, error_code); + kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); +void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) +{ + kvm_multiple_exception(vcpu, nr, true, error_code, true); +} +EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); + /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. @@ -4404,7 +4418,8 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, - vcpu->arch.exception.error_code); + vcpu->arch.exception.error_code, + vcpu->arch.exception.reinject); return; }