From patchwork Mon Aug 4 08:46:46 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christoffer Dall X-Patchwork-Id: 4668431 Return-Path: X-Original-To: patchwork-linux-arm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id EC4C3C0338 for ; Mon, 4 Aug 2014 08:57:05 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 6C55C20220 for ; Mon, 4 Aug 2014 08:57:02 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id D6EAD2021A for ; Mon, 4 Aug 2014 08:56:58 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1XEE2M-00066s-Ts; Mon, 04 Aug 2014 08:54:34 +0000 Received: from mail-lb0-f180.google.com ([209.85.217.180]) by bombadil.infradead.org with esmtps (Exim 4.80.1 #2 (Red Hat Linux)) id 1XEDwS-0001zs-2x for linux-arm-kernel@lists.infradead.org; Mon, 04 Aug 2014 08:48:29 +0000 Received: by mail-lb0-f180.google.com with SMTP id v6so5149288lbi.11 for ; Mon, 04 Aug 2014 01:48:05 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=xLhDv63FGrHdBmdz944v1HVbfOLMrR0CCsMyWAwgLPs=; b=lJklh0uxh3TTljoZpHsEHhHmx0cF7dMFFfJN6Okkswd5ZqxBFTfoRFxmUpg4v/ETsf MFcdpXt8Ui9m/s3+2o+G7dPS1bu05Ns6HAo4Gw8MA5al2wJBVE1BhE7dkQm57qD+yWB5 AJ3FMtBouUovQAoyR5SMaqpsqbdMPUSRIr5hLTEDX+/oFBBjMrvqneUk2s5HbhI7pRJU EG/h0JxqkqELeg/3lF8cBqECiAZ+/6s09ozMkVMkCiMfWcnZNfCa0ynR+STJnq2NBt/O DdTdpekihW4N1aK0MQbEipAUQhCBVdh3Kb93cCXlsS3Z6Ilpfo0EUn0I0SX3TXeUqdwA Kp1Q== X-Gm-Message-State: ALoCoQk1FsuVYRVcShEZfhzyjzNT6bunbzckwVWaSU7CNqztBowQy6SiiHqW6F3Cz0RjO8w38XFX X-Received: by 10.152.4.97 with SMTP id j1mr22625899laj.10.1407142085355; Mon, 04 Aug 2014 01:48:05 -0700 (PDT) Received: from localhost.localdomain (188-178-240-98-static.dk.customer.tdc.net. [188.178.240.98]) by mx.google.com with ESMTPSA id px1sm26731207lbb.46.2014.08.04.01.48.03 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 04 Aug 2014 01:48:04 -0700 (PDT) From: Christoffer Dall To: Paolo Bonzini , Gleb Natapov Subject: [GIT PULL 29/51] ARM: KVM: handle 64bit values passed to mrcc or from mcrr instructions in BE case Date: Mon, 4 Aug 2014 10:46:46 +0200 Message-Id: <1407142028-31105-30-git-send-email-christoffer.dall@linaro.org> X-Mailer: git-send-email 2.0.0 In-Reply-To: <1407142028-31105-1-git-send-email-christoffer.dall@linaro.org> References: <1407142028-31105-1-git-send-email-christoffer.dall@linaro.org> X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20140804_014828_350415_3BBB07D4 X-CRM114-Status: GOOD ( 13.55 ) X-Spam-Score: -0.7 (/) Cc: Marc Zyngier , Victor Kamensky , kvmarm@lists.cs.columbia.edu, linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00,RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Victor Kamensky In some cases the mcrr and mrrc instructions in combination with the ldrd and strd instructions need to deal with 64bit value in memory. The ldrd and strd instructions already handle endianness within word (register) boundaries but to get effect of the whole 64bit value represented correctly, rr_lo_hi macro is introduced and is used to swap registers positions when the mcrr and mrrc instructions are used. That has the effect of swapping two words. Signed-off-by: Victor Kamensky Reviewed-by: Christoffer Dall Acked-by: Marc Zyngier Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 18 ++++++++++++++++++ arch/arm/kvm/init.S | 4 ++-- arch/arm/kvm/interrupts.S | 4 ++-- arch/arm/kvm/interrupts_head.S | 6 +++--- 4 files changed, 25 insertions(+), 7 deletions(-) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 53b3c4a..3a67bec 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -61,6 +61,24 @@ #define ARM_EXCEPTION_FIQ 6 #define ARM_EXCEPTION_HVC 7 +/* + * The rr_lo_hi macro swaps a pair of registers depending on + * current endianness. It is used in conjunction with ldrd and strd + * instructions that load/store a 64-bit value from/to memory to/from + * a pair of registers which are used with the mrrc and mcrr instructions. + * If used with the ldrd/strd instructions, the a1 parameter is the first + * source/destination register and the a2 parameter is the second + * source/destination register. Note that the ldrd/strd instructions + * already swap the bytes within the words correctly according to the + * endianness setting, but the order of the registers need to be effectively + * swapped when used with the mrrc/mcrr instructions. + */ +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define rr_lo_hi(a1, a2) a2, a1 +#else +#define rr_lo_hi(a1, a2) a1, a2 +#endif + #ifndef __ASSEMBLY__ struct kvm; struct kvm_vcpu; diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 1b9844d..2cc14df 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -71,7 +71,7 @@ __do_hyp_init: bne phase2 @ Yes, second stage init @ Set the HTTBR to point to the hypervisor PGD pointer passed - mcrr p15, 4, r2, r3, c2 + mcrr p15, 4, rr_lo_hi(r2, r3), c2 @ Set the HTCR and VTCR to the same shareability and cacheability @ settings as the non-secure TTBCR and with T0SZ == 0. @@ -137,7 +137,7 @@ phase2: mov pc, r0 target: @ We're now in the trampoline code, switch page tables - mcrr p15, 4, r2, r3, c2 + mcrr p15, 4, rr_lo_hi(r2, r3), c2 isb @ Invalidate the old TLBs diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 0d68d40..24d4e65 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -52,7 +52,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) dsb ishst add r0, r0, #KVM_VTTBR ldrd r2, r3, [r0] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR + mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR isb mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) dsb ish @@ -135,7 +135,7 @@ ENTRY(__kvm_vcpu_run) ldr r1, [vcpu, #VCPU_KVM] add r1, r1, #KVM_VTTBR ldrd r2, r3, [r1] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR + mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR @ We're all done, just restore the GPRs and go to the guest restore_guest_regs diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 68d99c6..98c8c5b 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -520,7 +520,7 @@ ARM_BE8(rev r6, r6 ) mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL isb - mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL + mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL ldr r4, =VCPU_TIMER_CNTV_CVAL add r5, vcpu, r4 strd r2, r3, [r5] @@ -560,12 +560,12 @@ ARM_BE8(rev r6, r6 ) ldr r2, [r4, #KVM_TIMER_CNTVOFF] ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] - mcrr p15, 4, r2, r3, c14 @ CNTVOFF + mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF ldr r4, =VCPU_TIMER_CNTV_CVAL add r5, vcpu, r4 ldrd r2, r3, [r5] - mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL + mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL isb ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]