diff mbox series

[5/8] riscv: add alignment for text, rodata and data sections

Message ID 20200217083223.2011-6-zong.li@sifive.com (mailing list archive)
State New, archived
Headers show
Series Support strict kernel memory permissions for security | expand

Commit Message

Zong Li Feb. 17, 2020, 8:32 a.m. UTC
The kernel mapping will tried to optimize its mapping by using bigger
size. In rv64, it tries to use PMD_SIZE, and tryies to use PGDIR_SIZE in
rv32. To ensure that the start address of these sections could fit the
mapping entry size, make them align to the biggest alignment.

Define a macro SECTION_ALIGN because the HPAGE_SIZE or PMD_SIZE, etc.,
are invisible in linker script.

This patch is prepared for STRICT_KERNEL_RWX support.

Signed-off-by: Zong Li <zong.li@sifive.com>
---
 arch/riscv/include/asm/set_memory.h | 13 +++++++++++++
 arch/riscv/kernel/vmlinux.lds.S     |  4 +++-
 2 files changed, 16 insertions(+), 1 deletion(-)

Comments

Palmer Dabbelt March 5, 2020, 12:58 a.m. UTC | #1
On Mon, 17 Feb 2020 00:32:20 PST (-0800), zong.li@sifive.com wrote:
> The kernel mapping will tried to optimize its mapping by using bigger
> size. In rv64, it tries to use PMD_SIZE, and tryies to use PGDIR_SIZE in
> rv32. To ensure that the start address of these sections could fit the
> mapping entry size, make them align to the biggest alignment.
>
> Define a macro SECTION_ALIGN because the HPAGE_SIZE or PMD_SIZE, etc.,
> are invisible in linker script.
>
> This patch is prepared for STRICT_KERNEL_RWX support.
>
> Signed-off-by: Zong Li <zong.li@sifive.com>
> ---
>  arch/riscv/include/asm/set_memory.h | 13 +++++++++++++
>  arch/riscv/kernel/vmlinux.lds.S     |  4 +++-
>  2 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
> index a9783a878dca..a91f192063c2 100644
> --- a/arch/riscv/include/asm/set_memory.h
> +++ b/arch/riscv/include/asm/set_memory.h
> @@ -6,6 +6,7 @@
>  #ifndef _ASM_RISCV_SET_MEMORY_H
>  #define _ASM_RISCV_SET_MEMORY_H
>
> +#ifndef __ASSEMBLY__
>  /*
>   * Functions to change memory attributes.
>   */
> @@ -17,4 +18,16 @@ int set_memory_nx(unsigned long addr, int numpages);
>  int set_direct_map_invalid_noflush(struct page *page);
>  int set_direct_map_default_noflush(struct page *page);
>
> +#endif /* __ASSEMBLY__ */
> +
> +#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
> +#ifdef CONFIG_64BIT
> +#define SECTION_ALIGN (1 << 21)
> +#else
> +#define SECTION_ALIGN (1 << 22)
> +#endif
> +#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
> +#define SECTION_ALIGN L1_CACHE_BYTES
> +#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
> +
>  #endif /* _ASM_RISCV_SET_MEMORY_H */
> diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
> index 4ba8a5397e8b..0b145b9c1778 100644
> --- a/arch/riscv/kernel/vmlinux.lds.S
> +++ b/arch/riscv/kernel/vmlinux.lds.S
> @@ -37,6 +37,7 @@ SECTIONS
>  	PERCPU_SECTION(L1_CACHE_BYTES)
>  	__init_end = .;
>
> +	. = ALIGN(SECTION_ALIGN);
>  	.text : {
>  		_text = .;
>  		_stext = .;
> @@ -53,13 +54,14 @@ SECTIONS
>  	}
>
>  	/* Start of data section */
> -	RO_DATA(L1_CACHE_BYTES)
> +	RO_DATA(SECTION_ALIGN)
>  	.srodata : {
>  		*(.srodata*)
>  	}
>
>  	EXCEPTION_TABLE(0x10)
>
> +	. = ALIGN(SECTION_ALIGN);
>  	_sdata = .;
>
>  	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)

Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index a9783a878dca..a91f192063c2 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -6,6 +6,7 @@ 
 #ifndef _ASM_RISCV_SET_MEMORY_H
 #define _ASM_RISCV_SET_MEMORY_H
 
+#ifndef __ASSEMBLY__
 /*
  * Functions to change memory attributes.
  */
@@ -17,4 +18,16 @@  int set_memory_nx(unsigned long addr, int numpages);
 int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_64BIT
+#define SECTION_ALIGN (1 << 21)
+#else
+#define SECTION_ALIGN (1 << 22)
+#endif
+#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#define SECTION_ALIGN L1_CACHE_BYTES
+#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+
 #endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 4ba8a5397e8b..0b145b9c1778 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -37,6 +37,7 @@  SECTIONS
 	PERCPU_SECTION(L1_CACHE_BYTES)
 	__init_end = .;
 
+	. = ALIGN(SECTION_ALIGN);
 	.text : {
 		_text = .;
 		_stext = .;
@@ -53,13 +54,14 @@  SECTIONS
 	}
 
 	/* Start of data section */
-	RO_DATA(L1_CACHE_BYTES)
+	RO_DATA(SECTION_ALIGN)
 	.srodata : {
 		*(.srodata*)
 	}
 
 	EXCEPTION_TABLE(0x10)
 
+	. = ALIGN(SECTION_ALIGN);
 	_sdata = .;
 
 	RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)