Message ID | 1643029028-12710-10-git-send-email-anshuman.khandual@arm.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/mmap: Drop protection_map[] and platform's __SXXX/__PXXX requirements | expand |
On Mon, Jan 24, 2022 at 06:26:46PM +0530, Anshuman Khandual wrote: > This defines and exports a platform specific custom vm_get_page_prot() via > subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX > macros can be dropped which are no longer needed. What is the fundamental advantage of this approach? > > Cc: Russell King <linux@armlinux.org.uk> > Cc: Arnd Bergmann <arnd@arndb.de> > Cc: linux-arm-kernel@lists.infradead.org > Cc: linux-kernel@vger.kernel.org > Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> > --- > arch/arm/Kconfig | 1 + > arch/arm/include/asm/pgtable.h | 18 ------------ > arch/arm/mm/mmu.c | 50 ++++++++++++++++++++++++++++++---- > 3 files changed, 45 insertions(+), 24 deletions(-) > > diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig > index fabe39169b12..c12362d20c44 100644 > --- a/arch/arm/Kconfig > +++ b/arch/arm/Kconfig > @@ -23,6 +23,7 @@ config ARM > select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU > select ARCH_HAS_TEARDOWN_DMA_OPS if MMU > select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST > + select ARCH_HAS_VM_GET_PAGE_PROT > select ARCH_HAVE_CUSTOM_GPIO_H > select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K > select ARCH_HAS_GCOV_PROFILE_ALL > diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h > index cd1f84bb40ae..ec062dd6082a 100644 > --- a/arch/arm/include/asm/pgtable.h > +++ b/arch/arm/include/asm/pgtable.h > @@ -137,24 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > * 2) If we could do execute protection, then read is implied > * 3) write implies read permissions > */ > -#define __P000 __PAGE_NONE > -#define __P001 __PAGE_READONLY > -#define __P010 __PAGE_COPY > -#define __P011 __PAGE_COPY > -#define __P100 __PAGE_READONLY_EXEC > -#define __P101 __PAGE_READONLY_EXEC > -#define __P110 __PAGE_COPY_EXEC > -#define __P111 __PAGE_COPY_EXEC > - > -#define __S000 __PAGE_NONE > -#define __S001 __PAGE_READONLY > -#define __S010 __PAGE_SHARED > -#define __S011 __PAGE_SHARED > -#define __S100 __PAGE_READONLY_EXEC > -#define __S101 __PAGE_READONLY_EXEC > -#define __S110 __PAGE_SHARED_EXEC > -#define __S111 __PAGE_SHARED_EXEC > - > #ifndef __ASSEMBLY__ > /* > * ZERO_PAGE is a global shared page that is always zero: used > diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c > index 274e4f73fd33..3007d07bc0e7 100644 > --- a/arch/arm/mm/mmu.c > +++ b/arch/arm/mm/mmu.c > @@ -403,6 +403,8 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) > local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); > } > > +static pteval_t user_pgprot; > + > /* > * Adjust the PMD section entries according to the CPU in use. > */ > @@ -410,7 +412,7 @@ static void __init build_mem_type_table(void) > { > struct cachepolicy *cp; > unsigned int cr = get_cr(); > - pteval_t user_pgprot, kern_pgprot, vecs_pgprot; > + pteval_t kern_pgprot, vecs_pgprot; > int cpu_arch = cpu_architecture(); > int i; > > @@ -627,11 +629,6 @@ static void __init build_mem_type_table(void) > user_pgprot |= PTE_EXT_PXN; > #endif > > - for (i = 0; i < 16; i++) { > - pteval_t v = pgprot_val(protection_map[i]); > - protection_map[i] = __pgprot(v | user_pgprot); > - } > - > mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; > mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; > > @@ -670,6 +667,47 @@ static void __init build_mem_type_table(void) > } > } > > +pgprot_t vm_get_page_prot(unsigned long vm_flags) > +{ > + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { > + case VM_NONE: > + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); > + case VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); > + case VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); > + case VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); > + case VM_EXEC: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_EXEC | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_EXEC | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); > + case VM_EXEC | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); > + case VM_SHARED: > + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); > + case VM_SHARED | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); > + case VM_SHARED | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); > + case VM_SHARED | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); > + case VM_SHARED | VM_EXEC: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_READ: > + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); > + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: > + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); > + default: > + BUILD_BUG(); > + } > +} > +EXPORT_SYMBOL(vm_get_page_prot); > + > #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE > pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > unsigned long size, pgprot_t vma_prot) > -- > 2.25.1 > >
On 1/24/22 10:36 PM, Russell King (Oracle) wrote: > On Mon, Jan 24, 2022 at 06:26:46PM +0530, Anshuman Khandual wrote: >> This defines and exports a platform specific custom vm_get_page_prot() via >> subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX >> macros can be dropped which are no longer needed. > > What is the fundamental advantage of this approach? > Remove multiple 'core MM <--> platform' abstraction layers to map vm_flags access permission combination into page protection. From the cover letter .. ---------- Currently there are multiple layers of abstraction i.e __SXXX/__PXXX macros , protection_map[], arch_vm_get_page_prot() and arch_filter_pgprot() built between the platform and generic MM, finally defining vm_get_page_prot(). Hence this series proposes to drop all these abstraction levels and instead just move the responsibility of defining vm_get_page_prot() to the platform itself making it clean and simple. ----------
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fabe39169b12..c12362d20c44 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -23,6 +23,7 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index cd1f84bb40ae..ec062dd6082a 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -137,24 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * 2) If we could do execute protection, then read is implied * 3) write implies read permissions */ -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC - #ifndef __ASSEMBLY__ /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 274e4f73fd33..3007d07bc0e7 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -403,6 +403,8 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } +static pteval_t user_pgprot; + /* * Adjust the PMD section entries according to the CPU in use. */ @@ -410,7 +412,7 @@ static void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); - pteval_t user_pgprot, kern_pgprot, vecs_pgprot; + pteval_t kern_pgprot, vecs_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -627,11 +629,6 @@ static void __init build_mem_type_table(void) user_pgprot |= PTE_EXT_PXN; #endif - for (i = 0; i < 16; i++) { - pteval_t v = pgprot_val(protection_map[i]); - protection_map[i] = __pgprot(v | user_pgprot); - } - mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; @@ -670,6 +667,47 @@ static void __init build_mem_type_table(void) } } +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { + case VM_NONE: + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); + case VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); + case VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); + case VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY) | user_pgprot); + case VM_EXEC: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_EXEC | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_EXEC | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); + case VM_EXEC | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_COPY_EXEC) | user_pgprot); + case VM_SHARED: + return __pgprot(pgprot_val(__PAGE_NONE) | user_pgprot); + case VM_SHARED | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY) | user_pgprot); + case VM_SHARED | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); + case VM_SHARED | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED) | user_pgprot); + case VM_SHARED | VM_EXEC: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_READ: + return __pgprot(pgprot_val(__PAGE_READONLY_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: + return __pgprot(pgprot_val(__PAGE_SHARED_EXEC) | user_pgprot); + default: + BUILD_BUG(); + } +} +EXPORT_SYMBOL(vm_get_page_prot); + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot)
This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Cc: Russell King <linux@armlinux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> --- arch/arm/Kconfig | 1 + arch/arm/include/asm/pgtable.h | 18 ------------ arch/arm/mm/mmu.c | 50 ++++++++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 24 deletions(-)