Message ID | 20231124163510.1835740-10-joey.gouly@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Permission Overlay Extension | expand |
On Fri, Nov 24, 2023 at 04:34:54PM +0000, Joey Gouly wrote: > arch/arm64/include/asm/mman.h | 8 +++++++- > arch/arm64/include/asm/page.h | 10 ++++++++++ > arch/arm64/mm/mmap.c | 9 +++++++++ > arch/powerpc/include/asm/page.h | 11 +++++++++++ > arch/x86/include/asm/page.h | 10 ++++++++++ > fs/proc/task_mmu.c | 2 ++ > include/linux/mm.h | 13 ------------- > 7 files changed, 49 insertions(+), 14 deletions(-) It might be worth splitting out the powerpc/x86/generic parts into a separate patch. > diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h > index 5966ee4a6154..ecb2d18dc4d7 100644 > --- a/arch/arm64/include/asm/mman.h > +++ b/arch/arm64/include/asm/mman.h > @@ -7,7 +7,7 @@ > #include <uapi/asm/mman.h> > > static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > - unsigned long pkey __always_unused) > + unsigned long pkey) > { > unsigned long ret = 0; > > @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, > if (system_supports_mte() && (prot & PROT_MTE)) > ret |= VM_MTE; > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; > + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; > + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; > +#endif Is there anywhere that rejects pkey & 8 on arm64? Because with 128-bit PTEs, if we ever support them, we can have 4-bit pkeys. > #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) > diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h > index 2312e6ee595f..aabfda2516d2 100644 > --- a/arch/arm64/include/asm/page.h > +++ b/arch/arm64/include/asm/page.h > @@ -49,6 +49,16 @@ int pfn_is_map_memory(unsigned long pfn); > > #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > +/* A protection key is a 3-bit value */ > +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_2 > +# define VM_PKEY_BIT0 VM_HIGH_ARCH_2 > +# define VM_PKEY_BIT1 VM_HIGH_ARCH_3 > +# define VM_PKEY_BIT2 VM_HIGH_ARCH_4 > +# define VM_PKEY_BIT3 0 > +# define VM_PKEY_BIT4 0 > +#endif I think we should start from VM_HIGH_ARCH_BIT_0 and just move the VM_MTE, VM_MTE_ALLOWED to VM_HIGH_ARCH_BIT_{4,5}. > diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h > index e5fcc79b5bfb..a5e75ec333ad 100644 > --- a/arch/powerpc/include/asm/page.h > +++ b/arch/powerpc/include/asm/page.h > @@ -330,6 +330,17 @@ static inline unsigned long kaslr_offset(void) > } > > #include <asm-generic/memory_model.h> > + > +#if defined(CONFIG_ARCH_HAS_PKEYS) > +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 > +/* A protection key is a 5-bit value */ > +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 > +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 > +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 > +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 > +# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 > +#endif /* CONFIG_ARCH_HAS_PKEYS */ > + > #endif /* __ASSEMBLY__ */ > > #endif /* _ASM_POWERPC_PAGE_H */ > diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h > index d18e5c332cb9..b770db1a21e7 100644 > --- a/arch/x86/include/asm/page.h > +++ b/arch/x86/include/asm/page.h > @@ -87,5 +87,15 @@ static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) > > #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA > > +#if defined(CONFIG_ARCH_HAS_PKEYS) > +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 > +/* A protection key is a 4-bit value */ > +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 > +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 > +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 > +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 > +# define VM_PKEY_BIT4 0 > +#endif /* CONFIG_ARCH_HAS_PKEYS */ Rather than moving these to arch code, we could instead keep the generic definitions with some additional CONFIG_ARCH_HAS_PKEYS_{3,4,5}BIT selected from the arch code.
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 5966ee4a6154..ecb2d18dc4d7 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -7,7 +7,7 @@ #include <uapi/asm/mman.h> static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, - unsigned long pkey __always_unused) + unsigned long pkey) { unsigned long ret = 0; @@ -17,6 +17,12 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, if (system_supports_mte() && (prot & PROT_MTE)) ret |= VM_MTE; +#if defined(CONFIG_ARCH_HAS_PKEYS) + ret |= pkey & 0x1 ? VM_PKEY_BIT0 : 0; + ret |= pkey & 0x2 ? VM_PKEY_BIT1 : 0; + ret |= pkey & 0x4 ? VM_PKEY_BIT2 : 0; +#endif + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595f..aabfda2516d2 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -49,6 +49,16 @@ int pfn_is_map_memory(unsigned long pfn); #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) +#if defined(CONFIG_ARCH_HAS_PKEYS) +/* A protection key is a 3-bit value */ +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_2 +# define VM_PKEY_BIT0 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_4 +# define VM_PKEY_BIT3 0 +# define VM_PKEY_BIT4 0 +#endif + #include <asm-generic/getorder.h> #endif diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 645fe60d000f..2e2a5a9bcfa1 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -98,6 +98,15 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); +#ifdef CONFIG_ARCH_HAS_PKEYS + if (vm_flags & VM_PKEY_BIT0) + prot |= PTE_PO_IDX_0; + if (vm_flags & VM_PKEY_BIT1) + prot |= PTE_PO_IDX_1; + if (vm_flags & VM_PKEY_BIT2) + prot |= PTE_PO_IDX_2; +#endif + return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e5fcc79b5bfb..a5e75ec333ad 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -330,6 +330,17 @@ static inline unsigned long kaslr_offset(void) } #include <asm-generic/memory_model.h> + +#if defined(CONFIG_ARCH_HAS_PKEYS) +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +/* A protection key is a 5-bit value */ +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 +#endif /* CONFIG_ARCH_HAS_PKEYS */ + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index d18e5c332cb9..b770db1a21e7 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -87,5 +87,15 @@ static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#if defined(CONFIG_ARCH_HAS_PKEYS) +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +/* A protection key is a 4-bit value */ +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +# define VM_PKEY_BIT4 0 +#endif /* CONFIG_ARCH_HAS_PKEYS */ + #endif /* __KERNEL__ */ #endif /* _ASM_X86_PAGE_H */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ef2eb12906da..8c2790abeffb 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -691,7 +691,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_PKEY_BIT0)] = "", [ilog2(VM_PKEY_BIT1)] = "", [ilog2(VM_PKEY_BIT2)] = "", +#if VM_PKEY_BIT3 [ilog2(VM_PKEY_BIT3)] = "", +#endif #if VM_PKEY_BIT4 [ilog2(VM_PKEY_BIT4)] = "", #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index 418d26608ece..47f42d9893fe 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -328,19 +328,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ -#ifdef CONFIG_ARCH_HAS_PKEYS -# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 -# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ -# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ -# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 -# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 -#ifdef CONFIG_PPC -# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 -#else -# define VM_PKEY_BIT4 0 -#endif -#endif /* CONFIG_ARCH_HAS_PKEYS */ - #ifdef CONFIG_X86_USER_SHADOW_STACK /* * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
Define the VM_PKEY_BIT* values for arm64, and convert them into the arm64 specific pgprot values. Move the current values for x86 and PPC into arch/*. Signed-off-by: Joey Gouly <joey.gouly@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/mman.h | 8 +++++++- arch/arm64/include/asm/page.h | 10 ++++++++++ arch/arm64/mm/mmap.c | 9 +++++++++ arch/powerpc/include/asm/page.h | 11 +++++++++++ arch/x86/include/asm/page.h | 10 ++++++++++ fs/proc/task_mmu.c | 2 ++ include/linux/mm.h | 13 ------------- 7 files changed, 49 insertions(+), 14 deletions(-)