From patchwork Fri Aug 4 05:44:18 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Henderson X-Patchwork-Id: 9880607 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 3B62F6031B for ; Fri, 4 Aug 2017 06:01:15 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 2D47C2898E for ; Fri, 4 Aug 2017 06:01:15 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 1F441289AA; Fri, 4 Aug 2017 06:01:15 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.8 required=2.0 tests=BAYES_00,DKIM_SIGNED, RCVD_IN_DNSWL_HI,T_DKIM_INVALID autolearn=ham version=3.3.1 Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 6AC322898E for ; Fri, 4 Aug 2017 06:01:14 +0000 (UTC) Received: from localhost ([::1]:37010 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ddVfd-0002MW-OT for patchwork-qemu-devel@patchwork.kernel.org; Fri, 04 Aug 2017 02:01:13 -0400 Received: from eggs.gnu.org ([2001:4830:134:3::10]:59830) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ddVPh-00042f-Vb for qemu-devel@nongnu.org; Fri, 04 Aug 2017 01:44:47 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ddVPg-0003YI-6k for qemu-devel@nongnu.org; Fri, 04 Aug 2017 01:44:46 -0400 Received: from mail-pg0-x242.google.com ([2607:f8b0:400e:c05::242]:37030) by eggs.gnu.org with esmtps (TLS1.0:RSA_AES_128_CBC_SHA1:16) (Exim 4.71) (envelope-from ) id 1ddVPf-0003Vw-Tb for qemu-devel@nongnu.org; Fri, 04 Aug 2017 01:44:44 -0400 Received: by mail-pg0-x242.google.com with SMTP id 83so822317pgb.4 for ; Thu, 03 Aug 2017 22:44:43 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=sender:from:to:subject:date:message-id:in-reply-to:references; bh=0nzIjXHTFwDExDbFCq8HRhgDcDJfLlfeF++robag2lE=; b=pdRyPbHeo90Wo69zmq36OsoLRgTjVWu2crZv/7Vn3k/Df3XN7P50DZ356pel3kOaq8 zccsBFoRkik2FFbVgheMrOQMVCAVv8xb2/AkkeEfNaLU3d7i3etLLF+Lsxx+93tWgaR6 pJS0hvhxTER1f1zhjpaLAwA4JPdSbmxApC7kblQ0BF2i/+vFDAcwppcyYsRqyYWXBivh LKWs4tTuaRMe89G+zrPU1pb3uPUs9/2IcdAvQezXaD5JddqxNydbhOX0lGOMFbx5uxl3 3vcbnQ0D++TcpKXlsZrzfieTHC3qd1tdXyrVbLnPaaqQnE7fBsxLQLBY1YyU/+RjiXKR D12w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:sender:from:to:subject:date:message-id :in-reply-to:references; bh=0nzIjXHTFwDExDbFCq8HRhgDcDJfLlfeF++robag2lE=; b=NieasGpZW4yhRpL31LEhYsikZK5w44zXBXuAJWT2y8B+4hGjMm62tJdxLVzKQhUn+6 nEPikK5ogXsT4StJOzBdaZjMScdDcLsNL64j8c4QKqo+vXfzMjVDwQlj4Mb5MJSVtE1V 5gFj54hvGIY0GGh+qPbwMaa8ffJnaGcPHGQVnZR8lFTVDbmfEOlT2DXJrpNgR89AvNZy 6FzZr1bTtecn/N846lVyhB9dvd+ONJYzGJMq/T5OX9YTyEeMa+nk3o01Fpr60HqkLwU6 EVV5oOO1YZYHyWjzWpPbU/Rei6vDFNellFP3nktW+4/CN3bzOz0kvvtq1aTXbS2KN8tt Fq8w== X-Gm-Message-State: AIVw113ptXszlJaAz0/8wXSjEMvwq/AI3n5Tz1fF+HWzlcx6/ZeAqJQx ddkDcN5z+O1ckD7s30k= X-Received: by 10.84.211.136 with SMTP id c8mr1399212pli.273.1501825482732; Thu, 03 Aug 2017 22:44:42 -0700 (PDT) Received: from bigtime.twiddle.net (97-126-108-236.tukw.qwest.net. [97.126.108.236]) by smtp.gmail.com with ESMTPSA id o14sm1061063pfi.158.2017.08.03.22.44.41 for (version=TLS1_2 cipher=ECDHE-RSA-CHACHA20-POLY1305 bits=256/256); Thu, 03 Aug 2017 22:44:41 -0700 (PDT) From: Richard Henderson To: qemu-devel@nongnu.org Date: Thu, 3 Aug 2017 22:44:18 -0700 Message-Id: <20170804054426.10590-16-rth@twiddle.net> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20170804054426.10590-1-rth@twiddle.net> References: <20170804054426.10590-1-rth@twiddle.net> X-detected-operating-system: by eggs.gnu.org: Genre and OS details not recognized. X-Received-From: 2607:f8b0:400e:c05::242 Subject: [Qemu-devel] [PATCH for-2.11 15/23] tcg/arm: Improve tlb load for armv7 X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Sender: "Qemu-devel" X-Virus-Scanned: ClamAV using ClamSMTP Use UBFX to avoid limitation on CPU_TLB_BITS. Since we're dropping the initial shift, we need to replace the page masking. We can use MOVW+BIC to do this without shifting. The result is the same size as the armv6 path with one less conditional instruction. Signed-off-by: Richard Henderson --- tcg/arm/tcg-target.inc.c | 72 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 20 deletions(-) diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index 81ea900852..66c369c239 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -1173,18 +1173,33 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); - /* Should generate something like the following: - * shr tmp, addrlo, #TARGET_PAGE_BITS (1) + /* V7 generates the following: + * ubfx r0, addrlo, #TARGET_PAGE_BITS, #CPU_TLB_BITS * add r2, env, #high - * and r0, tmp, #(CPU_TLB_SIZE - 1) (2) - * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3) - * ldr r0, [r2, #cmp] (4) + * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS + * ldr r0, [r2, #cmp] + * ldr r2, [r2, #add] + * movw tmp, #page_align_mask + * bic tmp, addrlo, tmp + * cmp r0, tmp + * + * Otherwise we generate: + * shr tmp, addrlo, #TARGET_PAGE_BITS + * add r2, env, #high + * and r0, tmp, #(CPU_TLB_SIZE - 1) + * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS + * ldr r0, [r2, #cmp] + * ldr r2, [r2, #add] * tst addrlo, #s_mask - * ldr r2, [r2, #add] (5) * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS */ - tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, - 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + if (use_armv7_instructions) { + tcg_out_extract(s, COND_AL, TCG_REG_R0, addrlo, + TARGET_PAGE_BITS, CPU_TLB_BITS); + } else { + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, + 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); + } /* We checked that the offset is contained within 16 bits above. */ if (add_off > 0xfff || (use_armv6_instructions && cmp_off > 0xff)) { @@ -1194,9 +1209,10 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, add_off -= cmp_off & 0xff00; cmp_off &= 0xff; } - - tcg_out_dat_imm(s, COND_AL, ARITH_AND, - TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); + if (!use_armv7_instructions) { + tcg_out_dat_imm(s, COND_AL, ARITH_AND, + TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1); + } tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); @@ -1212,24 +1228,40 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, } } + /* Load the tlb addend. */ + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, add_off); + /* Check alignment. We don't support inline unaligned acceses, but we can easily support overalignment checks. */ if (a_bits < s_bits) { a_bits = s_bits; } - if (a_bits) { - tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, (1 << a_bits) - 1); - } - /* Load the tlb addend. */ - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, add_off); + if (use_armv7_instructions) { + tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); + int rot = encode_imm(mask); - tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, 0, - TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + if (rot >= 0) { + tcg_out_dat_imm(s, COND_AL, ARITH_BIC, TCG_REG_TMP, addrlo, + rotl(mask, rot) | (rot << 7)); + } else { + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, + addrlo, TCG_REG_TMP, 0); + } + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R0, TCG_REG_TMP, 0); + } else { + if (a_bits) { + tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, + (1 << a_bits) - 1); + } + tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, + 0, TCG_REG_R0, TCG_REG_TMP, + SHIFT_IMM_LSL(TARGET_PAGE_BITS)); + } if (TARGET_LONG_BITS == 64) { - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, - TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0)); + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addrhi, 0); } return TCG_REG_R2;