From patchwork Wed Feb 15 15:37:59 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 9574309 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 2DE036045F for ; Wed, 15 Feb 2017 15:39:41 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 1E4E527E5A for ; Wed, 15 Feb 2017 15:39:41 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 11932284EE; Wed, 15 Feb 2017 15:39:41 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-4.1 required=2.0 tests=BAYES_00,DKIM_SIGNED, RCVD_IN_DNSWL_MED,T_DKIM_INVALID autolearn=ham version=3.3.1 Received: from mother.openwall.net (mother.openwall.net [195.42.179.200]) by mail.wl.linuxfoundation.org (Postfix) with SMTP id E80F827E5A for ; Wed, 15 Feb 2017 15:39:39 +0000 (UTC) Received: (qmail 7257 invoked by uid 550); 15 Feb 2017 15:39:38 -0000 Mailing-List: contact kernel-hardening-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Delivered-To: mailing list kernel-hardening@lists.openwall.com Received: (qmail 5937 invoked from network); 15 Feb 2017 15:39:35 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=vSOSmjfy6z6siPwLdkQph71orWPhjD1VVy2WULSmZlI=; b=jdLN1EPwtt4RUAg4rHEWIZAMb12oLFME2tL0li9BJcN8p7tkbYp239FyLyW6/+P6Ox oiXiiQs+rVDx9ZzD1aCHmRxFzHNd54fNrUP53C4In2fJV8mVEoYdAO1IWYgXlkK4cnbV 2BcWkr8R3FfIIxVG8saUkCrSttuXnDHVRcSo4= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=vSOSmjfy6z6siPwLdkQph71orWPhjD1VVy2WULSmZlI=; b=N1hlypKpvsLJ7uiIiqtXigdur/xHT2XGosgjoy9D9emoenxhL0+QkSG/pAFr1LBrk3 rbKudfITPofxSqcKd5YLMUKK03SrXvSHU3OcpD71crTVzYbJVwZ812DND69VA7CxXENa IBBbG8lKSmXOd/bwmRh2XGRuols21A8crDGAAl6ruetq8EQkBrM1H2CldL5Fr1yCGc0Y 82km1IvHrxgH6DmhHEnk1b6DJ1fJwUE1KDNAV2J03KqmdiJXBz2AdiTWA/Fp+yrGMQem buWE6wxyFSHCzyFxCHkc/XlQMZ1z9U2rp6R8v4mKDgi7RzetpSW5FujAuJAsasRZV7PL D3eA== X-Gm-Message-State: AMke39miUc9pKZbRd39aypMfTTpQfq/baekdpz5HmmtdvGubynE9W5Hfw1/c5O8As3agy2f6 X-Received: by 10.223.160.114 with SMTP id l47mr29134831wrl.73.1487173163968; Wed, 15 Feb 2017 07:39:23 -0800 (PST) From: Ard Biesheuvel To: linux-arm-kernel@lists.infradead.org, mark.rutland@arm.com, will.deacon@arm.com, catalin.marinas@arm.com, keescook@chromium.org, labbott@fedoraproject.org, james.morse@arm.com Cc: kernel-hardening@lists.openwall.com, Ard Biesheuvel Date: Wed, 15 Feb 2017 15:37:59 +0000 Message-Id: <1487173081-13425-2-git-send-email-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1487173081-13425-1-git-send-email-ard.biesheuvel@linaro.org> References: <1487173081-13425-1-git-send-email-ard.biesheuvel@linaro.org> Subject: [kernel-hardening] [RFC PATCH 1/3] arm64: mmu: restrict permissions of early kernel mappings X-Virus-Scanned: ClamAV using ClamSMTP Restrict the permissions of the early kernel mappings as much as possible, by - making the ID map read-only, - making the virtual kernel mapping non-executable initially, and fixing up the permissions after relocation processing has occurred, - making the kernel text and as much of .rodata as possible read-only, this is limited by the presence of the __ro_after_init section, which should remain writable throughout the entire __init sequence, - making the .init.text section read-only if its placement allows it. The latter condition is based on a couple of parameters, i.e., the page size, the swapper block size and the actual physical placement of the kernel Image. On 16k and 64k pagesize kernels and 4k pagesize kernels with CONFIG_DEBUG_ALIGN_RODATA=y and CONFIG_RELOCATABLE=y, this condition is guaranteed to be met. In other cases, it depends on the placement of the kernel and the sizes of the various sections: this will be taken advantage of in a subsequent patch. In the mean time, we can put some space between the end of the init code section and the writable init data section by moving the .rela section into inittext. Signed-off-by: Ard Biesheuvel --- arch/arm64/include/asm/kernel-pgtable.h | 3 ++ arch/arm64/kernel/head.S | 53 +++++++++++++++++++- arch/arm64/kernel/vmlinux.lds.S | 14 +++--- 3 files changed, 62 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 7803343e5881..cd543e51e6e8 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -87,6 +87,9 @@ #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) #endif +#define SWAPPER_MM_MMUFLAGS_RW (SWAPPER_MM_MMUFLAGS | PTE_PXN | PTE_UXN) +#define SWAPPER_MM_MMUFLAGS_RX (SWAPPER_MM_MMUFLAGS | PTE_RDONLY) + /* * To make optimal use of block mappings when laying out the linear * mapping, round down the base of physical memory to a size that can diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4fb6ccd886d1..9ea0286c33d5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -386,7 +386,7 @@ __create_page_tables: cmp x0, x6 b.lo 1b - mov x7, SWAPPER_MM_MMUFLAGS + mov x7, SWAPPER_MM_MMUFLAGS_RX /* * Create the identity mapping. @@ -438,6 +438,8 @@ __create_page_tables: adr_l x6, __idmap_text_end // __pa(__idmap_text_end) create_block_map x0, x7, x3, x5, x6 + mov_q x7, SWAPPER_MM_MMUFLAGS_RW + /* * Map the kernel image (starting with PHYS_OFFSET). */ @@ -873,6 +875,7 @@ __primary_switch: #ifdef CONFIG_RELOCATABLE bl __relocate_kernel #ifdef CONFIG_RANDOMIZE_BASE + bl __update_page_permissions ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET blr x8 @@ -898,7 +901,55 @@ __primary_switch: bl __relocate_kernel #endif #endif + bl __update_page_permissions ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 ENDPROC(__primary_switch) + +__update_page_permissions: + ldr x0, =swapper_pg_dir + (SWAPPER_PGTABLE_LEVELS - 1) * PAGE_SIZE + + /* + * Remap the kernel text (and as much of rodata as we can, but without + * covering the __ro_after_init section) with R-X permissions + */ + mov x7, SWAPPER_MM_MMUFLAGS_RX + ldr x5, =_text + adrp x3, _text + ldr x6, =__start_data_ro_after_init - SWAPPER_BLOCK_SIZE + create_block_map x0, x7, x3, x5, x6 + + /* + * Remap .init.text with R-X permissions, unless the swapper block that + * covers it intersects with adjacent writable regions. In that case, + * there is no way around using RWX permissions for this region. + */ + ldr x5, =__inittext_begin + adrp x3, __inittext_begin + ldr x6, =_einittext - 1 + + /* + * Whether we must use RWX permissions depends on the swapper block + * size, the page size, the segment alignment and possibly on the + * runtime physical offset of the kernel image modulo the swapper + * block size, in which case we can only decide this at runtime. + */ +#if SWAPPER_BLOCK_SIZE > PAGE_SIZE + ldr x8, =__end_data_ro_after_init + ldr x9, =__initdata_begin + bic x5, x5, #SWAPPER_BLOCK_SIZE - 1 + bic x9, x9, #SWAPPER_BLOCK_SIZE - 1 + + cmp x5, x8 + ccmp x9, x6, #1, ge + mov x8, SWAPPER_MM_MMUFLAGS // RWX permissions + csel x7, x7, x8, ge +#endif + create_block_map x0, x7, x3, x5, x6 + + tlbi vmalle1 // Remove any stale TLB entries + dsb nsh + + ret +ENDPROC(__update_page_permissions) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 2c93d259046c..6778f478fdee 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -160,6 +160,13 @@ SECTIONS *(.altinstr_replacement) } + .rela : ALIGN(8) { + *(.rela .rela*) + } + + __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); + __rela_size = SIZEOF(.rela); + . = ALIGN(PAGE_SIZE); __inittext_end = .; __initdata_begin = .; @@ -179,13 +186,6 @@ SECTIONS PERCPU_SECTION(L1_CACHE_BYTES) - .rela : ALIGN(8) { - *(.rela .rela*) - } - - __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR); - __rela_size = SIZEOF(.rela); - . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; __init_end = .;