From patchwork Wed Nov 19 15:43:11 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nadav Amit X-Patchwork-Id: 5338291 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id E88259F2F1 for ; Wed, 19 Nov 2014 15:44:21 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id C30F020172 for ; Wed, 19 Nov 2014 15:44:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id AB96720176 for ; Wed, 19 Nov 2014 15:44:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756041AbaKSPoQ (ORCPT ); Wed, 19 Nov 2014 10:44:16 -0500 Received: from mailgw12.technion.ac.il ([132.68.225.12]:9092 "EHLO mailgw12.technion.ac.il" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756020AbaKSPoO (ORCPT ); Wed, 19 Nov 2014 10:44:14 -0500 X-IronPort-Anti-Spam-Filtered: true X-IronPort-Anti-Spam-Result: AjACANq5bFSERCABjGdsb2JhbABahDzTXgKBBxYBAQEBAQEQAQEBJ0KEAwEFJ1IQUVcZiEHOG4V5AQEIAgEfkDFXB4RLBYZGsxNrgQeBRAEBAQ X-IPAS-Result: AjACANq5bFSERCABjGdsb2JhbABahDzTXgKBBxYBAQEBAQEQAQEBJ0KEAwEFJ1IQUVcZiEHOG4V5AQEIAgEfkDFXB4RLBYZGsxNrgQeBRAEBAQ X-IronPort-AV: E=Sophos;i="5.07,417,1413234000"; d="scan'208";a="130100402" Received: from csa.cs.technion.ac.il ([132.68.32.1]) by mailgw12.technion.ac.il with ESMTP; 19 Nov 2014 17:44:08 +0200 Received: from csn.cs.technion.ac.il (csn.cs.technion.ac.il [132.68.32.15]) by csa.cs.technion.ac.il (Postfix) with ESMTP id 0E471140041; Wed, 19 Nov 2014 17:44:08 +0200 (IST) Received: from csl-tapuz20.cs.technion.ac.il (csl-tapuz20.cs.technion.ac.il [132.68.206.58]) by csn.cs.technion.ac.il (Postfix) with ESMTPSA id F0F51A1BFE; Wed, 19 Nov 2014 17:44:07 +0200 (IST) From: Nadav Amit To: pbonzini@redhat.com Cc: kvm@vger.kernel.org, Nadav Amit Subject: [PATCH 4/6] KVM: x86: Perform limit checks when assigning EIP Date: Wed, 19 Nov 2014 17:43:11 +0200 Message-Id: <1416411793-22244-5-git-send-email-namit@cs.technion.ac.il> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1416411793-22244-1-git-send-email-namit@cs.technion.ac.il> References: <1416411793-22244-1-git-send-email-namit@cs.technion.ac.il> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP If branch (e.g., jmp, ret) causes limit violations, since the target IP > limit, the #GP exception occurs before the branch. In other words, the RIP pushed on the stack should be that of the branch and not that of the target. To do so, we can call __linearize, with new EIP, which also saves us the code which performs the canonical address checks. On the case of assigning an EIP >= 2^32 (when switching cs.l), we also safe, as __linearize will check the new EIP does not exceed the limit and would trigger #GP(0) otherwise. Signed-off-by: Nadav Amit --- arch/x86/kvm/emulate.c | 95 ++++++++++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 41 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d9461e4..4d083fb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -575,40 +575,6 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) return emulate_exception(ctxt, NM_VECTOR, 0, false); } -static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, - int cs_l) -{ - switch (ctxt->op_bytes) { - case 2: - ctxt->_eip = (u16)dst; - break; - case 4: - ctxt->_eip = (u32)dst; - break; -#ifdef CONFIG_X86_64 - case 8: - if ((cs_l && is_noncanonical_address(dst)) || - (!cs_l && (dst >> 32) != 0)) - return emulate_gp(ctxt, 0); - ctxt->_eip = dst; - break; -#endif - default: - WARN(1, "unsupported eip assignment size\n"); - } - return X86EMUL_CONTINUE; -} - -static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) -{ - return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); -} - -static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) -{ - return assign_eip_near(ctxt, ctxt->_eip + rel); -} - static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; @@ -656,7 +622,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, bool write, bool fetch, - ulong *linear) + enum x86emul_mode mode, ulong *linear) { struct desc_struct desc; bool usable; @@ -666,7 +632,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, la = seg_base(ctxt, addr.seg) + addr.ea; *max_size = 0; - switch (ctxt->mode) { + switch (mode) { case X86EMUL_MODE_PROT64: if (is_noncanonical_address(la)) return emulate_gp(ctxt, 0); @@ -725,9 +691,55 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, linear); + return __linearize(ctxt, addr, &max_size, size, write, false, + ctxt->mode, linear); } +static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, + enum x86emul_mode mode) +{ + ulong linear; + int rc; + unsigned max_size; + struct segmented_address addr = { .seg = VCPU_SREG_CS, + .ea = dst }; + + if (ctxt->op_bytes != sizeof(unsigned long)) + addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); + rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); + if (rc == X86EMUL_CONTINUE) + ctxt->_eip = addr.ea; + return rc; +} + +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) +{ + return assign_eip(ctxt, dst, ctxt->mode); +} + +static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, + const struct desc_struct *cs_desc) +{ + enum x86emul_mode mode = ctxt->mode; + +#ifdef CONFIG_X86_64 + if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) { + u64 efer = 0; + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + if (efer & EFER_LMA) + mode = X86EMUL_MODE_PROT64; + } +#endif + if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) + mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; + return assign_eip(ctxt, dst, mode); +} + +static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) +{ + return assign_eip_near(ctxt, ctxt->_eip + rel); +} static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, @@ -766,7 +778,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); + rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, + &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; @@ -2032,7 +2045,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ @@ -2120,7 +2133,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) &new_desc); if (rc != X86EMUL_CONTINUE) return rc; - rc = assign_eip_far(ctxt, eip, new_desc.l); + rc = assign_eip_far(ctxt, eip, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); @@ -3010,7 +3023,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) goto fail;