@@ -1422,6 +1422,17 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
+config ARM64_WXN
+ bool "Enable WXN attribute so all writable mappings are non-exec"
+ help
+ Set the WXN bit in the SCTLR system register so that all writable
+ mappings are treated as if the PXN/UXN bit is set as well.
+ If this is set to Y, it can still be disabled at runtime by
+ passing 'arm64.nowxn' on the kernel command line.
+
+ This should only be set if no software needs to be supported that
+ relies on being able to execute from writable mappings.
+
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
@@ -915,6 +915,15 @@ extern struct arm64_ftr_override id_aa64isar2_override;
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
+extern int arm64_no_wxn;
+
+static inline bool arm64_wxn_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_ARM64_WXN))
+ return false;
+ return arm64_no_wxn == 0;
+}
+
#endif /* __ASSEMBLY__ */
#endif
@@ -35,11 +35,40 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
}
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
+static inline bool arm64_check_wx_prot(unsigned long prot,
+ struct task_struct *tsk)
+{
+ /*
+ * When we are running with SCTLR_ELx.WXN==1, writable mappings are
+ * implicitly non-executable. This means we should reject such mappings
+ * when user space attempts to create them using mmap() or mprotect().
+ */
+ if (arm64_wxn_enabled() &&
+ ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC))) {
+ /*
+ * User space libraries such as libffi carry elaborate
+ * heuristics to decide whether it is worth it to even attempt
+ * to create writable executable mappings, as PaX or selinux
+ * enabled systems will outright reject it. They will usually
+ * fall back to something else (e.g., two separate shared
+ * mmap()s of a temporary file) on failure.
+ */
+ pr_info_ratelimited(
+ "process %s (%d) attempted to create PROT_WRITE+PROT_EXEC mapping\n",
+ tsk->comm, tsk->pid);
+ return false;
+ }
+ return true;
+}
+
static inline bool arch_validate_prot(unsigned long prot,
unsigned long addr __always_unused)
{
unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
+ if (!arm64_check_wx_prot(prot, current))
+ return false;
+
if (system_supports_bti())
supported |= PROT_BTI;
@@ -50,6 +79,13 @@ static inline bool arch_validate_prot(unsigned long prot,
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
+static inline bool arch_validate_mmap_prot(unsigned long prot,
+ unsigned long addr)
+{
+ return arm64_check_wx_prot(prot, current);
+}
+#define arch_validate_mmap_prot arch_validate_mmap_prot
+
static inline bool arch_validate_flags(unsigned long vm_flags)
{
if (!system_supports_mte())
@@ -19,13 +19,41 @@
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/proc-fns.h>
-#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
extern bool rodata_full;
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+ bool write, bool execute, bool foreign)
+{
+ if (IS_ENABLED(CONFIG_ARM64_WXN) && execute &&
+ (vma->vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+ pr_warn_ratelimited(
+ "process %s (%d) attempted to execute from writable memory\n",
+ current->comm, current->pid);
+ /* disallow unless the nowxn override is set */
+ return !arm64_wxn_enabled();
+ }
+ return true;
+}
+
static inline void contextidr_thread_switch(struct task_struct *next)
{
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
@@ -278,15 +278,24 @@ static bool arm64_early_this_cpu_has_e0pd(void)
ID_AA64MMFR2_E0PD_SHIFT);
}
+extern void disable_wxn(void);
+
static void map_kernel(void *fdt, u64 kaslr_offset, u64 va_offset)
{
pgd_t *pgdp = (void *)init_pg_dir + PAGE_SIZE;
pgprot_t text_prot = PAGE_KERNEL_ROX;
pgprot_t data_prot = PAGE_KERNEL;
pgprot_t prot;
+ bool nowxn = false;
- if (cmdline_has(fdt, "rodata=off"))
+ if (cmdline_has(fdt, "rodata=off")) {
text_prot = PAGE_KERNEL_EXEC;
+ nowxn = true;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_WXN) &&
+ (nowxn || cmdline_has(fdt, "arm64.nowxn")))
+ disable_wxn();
// If we have a CPU that supports BTI and a kernel built for
// BTI then mark the kernel executable text as guarded pages
@@ -73,6 +73,21 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_ARM64_WXN
+asmlinkage int arm64_no_wxn __ro_after_init;
+
+static int set_arm64_no_wxn(char *str)
+{
+ arm64_no_wxn = 1;
+
+ // Make the value visible to booting secondaries
+ dcache_clean_inval_poc((u64)&arm64_no_wxn,
+ (u64)&arm64_no_wxn + sizeof(arm64_no_wxn));
+ return 1;
+}
+__setup("arm64.nowxn", set_arm64_no_wxn);
+#endif
+
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
@@ -660,15 +675,19 @@ static int __init parse_rodata(char *arg)
int ret = strtobool(arg, &rodata_enabled);
if (!ret) {
rodata_full = false;
- return 0;
- }
+ } else {
+ /* permit 'full' in addition to boolean options */
+ if (strcmp(arg, "full"))
+ return -EINVAL;
- /* permit 'full' in addition to boolean options */
- if (strcmp(arg, "full"))
- return -EINVAL;
+ rodata_enabled = true;
+ rodata_full = true;
+ }
- rodata_enabled = true;
- rodata_full = true;
+#ifdef CONFIG_ARM64_WXN
+ if (!rodata_enabled)
+ set_arm64_no_wxn(NULL);
+#endif
return 0;
}
early_param("rodata", parse_rodata);
@@ -498,8 +498,31 @@ SYM_FUNC_START(__cpu_setup)
* Prepare SCTLR
*/
mov_q x0, INIT_SCTLR_EL1_MMU_ON
+#ifdef CONFIG_ARM64_WXN
+ ldr_l w1, arm64_no_wxn, x1
+ tst w1, #0x1 // WXN disabled on command line?
+ orr x1, x0, #SCTLR_ELx_WXN
+ csel x0, x0, x1, ne
+#endif
ret // return to head.S
.unreq mair
.unreq tcr
SYM_FUNC_END(__cpu_setup)
+
+#ifdef CONFIG_ARM64_WXN
+ .align 2
+SYM_CODE_START(__pi_disable_wxn)
+ mrs x0, sctlr_el1
+ bic x1, x0, #SCTLR_ELx_M
+ msr sctlr_el1, x1
+ isb
+ tlbi vmalle1
+ dsb nsh
+ isb
+ bic x0, x0, #SCTLR_ELx_WXN
+ msr sctlr_el1, x0
+ isb
+ ret
+SYM_CODE_END(__pi_disable_wxn)
+#endif