@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#define __HAVE_ARCH_ADDR_COND_PMD
#define __HAVE_ARCH_PGD_FREE
#include <asm-generic/pgalloc.h>
@@ -74,10 +75,16 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
* of the mm address space.
*/
static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep,
+ unsigned long address)
{
+ pmdval_t pmd = PMD_TYPE_TABLE | PMD_TABLE_UXN;
VM_BUG_ON(mm && mm != &init_mm);
- __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
+ if (IS_DATA_VMALLOC_ADDR(address) &&
+ IS_DATA_VMALLOC_ADDR(address + PMD_SIZE)) {
+ pmd |= PMD_TABLE_PXN;
+ }
+ __pmd_populate(pmdp, __pa(ptep), pmd);
}
static inline void
@@ -34,4 +34,9 @@ static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
extern unsigned long code_region_start __ro_after_init;
extern unsigned long code_region_end __ro_after_init;
+#define IS_DATA_VMALLOC_ADDR(vaddr) (((vaddr) < code_region_start || \
+ (vaddr) > code_region_end) && \
+ ((vaddr) >= VMALLOC_START && \
+ (vaddr) < VMALLOC_END))
+
#endif /* _ASM_ARM64_VMALLOC_H */
@@ -69,7 +69,7 @@ static int copy_pte(struct trans_pgd_info *info, pmd_t *dst_pmdp,
dst_ptep = trans_alloc(info);
if (!dst_ptep)
return -ENOMEM;
- pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
+ pmd_populate_kernel_at(NULL, dst_pmdp, dst_ptep, addr);
dst_ptep = pte_offset_kernel(dst_pmdp, start);
src_ptep = pte_offset_kernel(src_pmdp, start);
In an attempt to protect against write-then-execute attacks wherein an adversary stages malicious code into a data page and then later uses a write gadget to mark the data page executable, arm64 enforces PXNTable when allocating pmd descriptors during the init process. However, these protections are not maintained for dynamic memory allocations, creating an extensive threat surface to write-then-execute attacks targeting pages allocated through the vmalloc interface. Straightforward modifications to the pgalloc interface allow for the dynamic enforcement of PXNTable, restricting writable and privileged-executable code pages to known kernel text, bpf-allocated programs, and kprobe-allocated pages, all of which have more extensive verification interfaces than the generic vmalloc region. This patch adds a preprocessor define to check whether a pmd is allocated by vmalloc and exists outside of a known code region, and if so, marks the pmd as PXNTable, protecting over 100 last-level page tables from manipulation in the process. Signed-off-by: Maxwell Bland <mbland@motorola.com> --- arch/arm64/include/asm/pgalloc.h | 11 +++++++++-- arch/arm64/include/asm/vmalloc.h | 5 +++++ arch/arm64/mm/trans_pgd.c | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-)