From patchwork Wed Dec 6 14:38:29 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marc Zyngier X-Patchwork-Id: 10096021 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 8896F60327 for ; Wed, 6 Dec 2017 14:39:17 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7311D2918E for ; Wed, 6 Dec 2017 14:39:17 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 67D432919A; Wed, 6 Dec 2017 14:39:17 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id ADF1D291B2 for ; Wed, 6 Dec 2017 14:39:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752317AbdLFOjO (ORCPT ); Wed, 6 Dec 2017 09:39:14 -0500 Received: from foss.arm.com ([217.140.101.70]:36738 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752050AbdLFOi7 (ORCPT ); Wed, 6 Dec 2017 09:38:59 -0500 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 5348A1688; Wed, 6 Dec 2017 06:38:59 -0800 (PST) Received: from approximate.cambridge.arm.com (approximate.cambridge.arm.com [10.1.207.62]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id CE7B43F487; Wed, 6 Dec 2017 06:38:57 -0800 (PST) From: Marc Zyngier To: linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu Cc: Christoffer Dall , Mark Rutland , Catalin Marinas , Will Deacon , James Morse Subject: [PATCH 08/18] arm64: KVM: Dynamically patch the kernel/hyp VA mask Date: Wed, 6 Dec 2017 14:38:29 +0000 Message-Id: <20171206143839.29223-9-marc.zyngier@arm.com> X-Mailer: git-send-email 2.14.2 In-Reply-To: <20171206143839.29223-1-marc.zyngier@arm.com> References: <20171206143839.29223-1-marc.zyngier@arm.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP So far, we're using a complicated sequence of alternatives to patch the kernel/hyp VA mask on non-VHE, and NOP out the masking altogether when on VHE. THe newly introduced dynamic patching gives us the opportunity to simplify that code by patching a single instruction with the correct mask (instead of the mind bending cummulative masking we have at the moment) or even a single NOP on VHE. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 42 ++++++----------------- arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/haslr.c | 74 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 33 deletions(-) create mode 100644 arch/arm64/kvm/haslr.c diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 672c8684d5c2..accff489aa22 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -69,9 +69,6 @@ * mappings, and none of this applies in that case. */ -#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) -#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) - #ifdef __ASSEMBLY__ #include @@ -81,27 +78,13 @@ * Convert a kernel VA into a HYP VA. * reg: VA to be converted. * - * This generates the following sequences: - * - High mask: - * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK - * nop - * - Low mask: - * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK - * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK - * - VHE: - * nop - * nop - * - * The "low mask" version works because the mask is a strict subset of - * the "high mask", hence performing the first mask for nothing. - * Should be completely invisible on any viable CPU. + * The actual code generation takes place in kvm_update_va_mask, and + * the instructions below are only there to reserve the space and + * perform the register allocation. */ .macro kern_hyp_va reg -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK -alternative_else_nop_endif -alternative_if ARM64_HYP_OFFSET_LOW - and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK +alternative_cb kvm_update_va_mask + and \reg, \reg, #1 alternative_else_nop_endif .endm @@ -113,18 +96,13 @@ alternative_else_nop_endif #include #include +u32 kvm_update_va_mask(struct alt_instr *alt, int index, u32 oinsn); + static inline unsigned long __kern_hyp_va(unsigned long v) { - asm volatile(ALTERNATIVE("and %0, %0, %1", - "nop", - ARM64_HAS_VIRT_HOST_EXTN) - : "+r" (v) - : "i" (HYP_PAGE_OFFSET_HIGH_MASK)); - asm volatile(ALTERNATIVE("nop", - "and %0, %0, %1", - ARM64_HYP_OFFSET_LOW) - : "+r" (v) - : "i" (HYP_PAGE_OFFSET_LOW_MASK)); + asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" + kvm_update_va_mask) + : "+r" (v)); return v; } diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 87c4f7ae24de..baba030ee29e 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -16,7 +16,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/e kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o -kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o +kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o haslr.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o diff --git a/arch/arm64/kvm/haslr.c b/arch/arm64/kvm/haslr.c new file mode 100644 index 000000000000..c21ab93a9ad9 --- /dev/null +++ b/arch/arm64/kvm/haslr.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2017 ARM Ltd. + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1) +#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1) + +static unsigned long get_hyp_va_mask(void) +{ + phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); + unsigned long mask = HYP_PAGE_OFFSET_HIGH_MASK; + + /* + * Activate the lower HYP offset only if the idmap doesn't + * clash with it, + */ + if (idmap_addr > HYP_PAGE_OFFSET_LOW_MASK) + mask = HYP_PAGE_OFFSET_HIGH_MASK; + + return mask; +} + +u32 kvm_update_va_mask(struct alt_instr *alt, int index, u32 oinsn) +{ + u32 rd, rn, insn; + u64 imm; + + /* We only expect a 1 instruction sequence */ + BUG_ON((alt->alt_len / sizeof(insn)) != 1); + + /* VHE doesn't need any address translation, let's NOP everything */ + if (has_vhe()) + return aarch64_insn_gen_nop(); + + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn); + rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn); + + switch (index) { + default: + /* Something went wrong... */ + insn = AARCH64_BREAK_FAULT; + break; + + case 0: + imm = get_hyp_va_mask(); + insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND, + AARCH64_INSN_VARIANT_64BIT, + rn, rd, imm); + break; + } + + BUG_ON(insn == AARCH64_BREAK_FAULT); + + return insn; +}