diff mbox series

[V3,1/2] mm/mmap: Restrict generic protection_map[] array visibility

Message ID 20220616040924.1022607-2-anshuman.khandual@arm.com (mailing list archive)
State New
Headers show
Series mm/mmap: Drop __SXXX/__PXXX macros from across platforms | expand

Commit Message

Anshuman Khandual June 16, 2022, 4:09 a.m. UTC
Restrict generic protection_map[] array visibility only for platforms which
do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
their private static protection_map[] still implementing an array look up.
These private protection_map[] array could do without __PXXX/__SXXX macros,
making them redundant and dropping them off as well.

But platforms which do not define their custom vm_get_page_prot() enabling
ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
 arch/arm64/mm/mmap.c                  | 21 +++++++++++++++++++++
 arch/powerpc/include/asm/pgtable.h    |  2 ++
 arch/powerpc/mm/book3s64/pgtable.c    | 20 ++++++++++++++++++++
 arch/sparc/include/asm/pgtable_64.h   | 19 -------------------
 arch/sparc/mm/init_64.c               |  3 +++
 arch/x86/include/asm/pgtable_types.h  | 19 -------------------
 arch/x86/mm/pgprot.c                  | 19 +++++++++++++++++++
 include/linux/mm.h                    |  2 ++
 mm/mmap.c                             |  2 +-
 10 files changed, 68 insertions(+), 57 deletions(-)

Comments

Christophe Leroy June 16, 2022, 5:35 a.m. UTC | #1
Le 16/06/2022 à 06:09, Anshuman Khandual a écrit :
> Restrict generic protection_map[] array visibility only for platforms which
> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
> their private static protection_map[] still implementing an array look up.
> These private protection_map[] array could do without __PXXX/__SXXX macros,
> making them redundant and dropping them off as well.
> 
> But platforms which do not define their custom vm_get_page_prot() enabling
> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
> 
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: linux-mm@kvack.org
> Cc: linux-kernel@vger.kernel.org
> Acked-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>   arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
>   arch/arm64/mm/mmap.c                  | 21 +++++++++++++++++++++
>   arch/powerpc/include/asm/pgtable.h    |  2 ++
>   arch/powerpc/mm/book3s64/pgtable.c    | 20 ++++++++++++++++++++
>   arch/sparc/include/asm/pgtable_64.h   | 19 -------------------
>   arch/sparc/mm/init_64.c               |  3 +++
>   arch/x86/include/asm/pgtable_types.h  | 19 -------------------
>   arch/x86/mm/pgprot.c                  | 19 +++++++++++++++++++
>   include/linux/mm.h                    |  2 ++
>   mm/mmap.c                             |  2 +-
>   10 files changed, 68 insertions(+), 57 deletions(-)
> 

> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index d564d0ecd4cd..8ed2a80c896e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -21,6 +21,7 @@ struct mm_struct;
>   #endif /* !CONFIG_PPC_BOOK3S */
>   
>   /* Note due to the way vm flags are laid out, the bits are XWR */
> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT

This ifdef if not necessary for now, it doesn't matter if __P000 etc 
still exist thought not used.

>   #define __P000	PAGE_NONE
>   #define __P001	PAGE_READONLY
>   #define __P010	PAGE_COPY
> @@ -38,6 +39,7 @@ struct mm_struct;
>   #define __S101	PAGE_READONLY_X
>   #define __S110	PAGE_SHARED_X
>   #define __S111	PAGE_SHARED_X
> +#endif
>   
>   #ifndef __ASSEMBLY__
>   
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 7b9966402b25..d3b019b95c1d 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
>   EXPORT_SYMBOL_GPL(memremap_compat_align);
>   #endif
>   
> +/* Note due to the way vm flags are laid out, the bits are XWR */
> +static const pgprot_t protection_map[16] = {
> +	[VM_NONE]					= PAGE_NONE,
> +	[VM_READ]					= PAGE_READONLY,
> +	[VM_WRITE]					= PAGE_COPY,
> +	[VM_WRITE | VM_READ]				= PAGE_COPY,
> +	[VM_EXEC]					= PAGE_READONLY_X,
> +	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
> +	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
> +	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
> +	[VM_SHARED]					= PAGE_NONE,
> +	[VM_SHARED | VM_READ]				= PAGE_READONLY,
> +	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
> +	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
> +	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
> +	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
> +	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
> +	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
> +};
> +

There is not much point is first additing that here and then move it 
elsewhere in the second patch.

I think with my suggestion to use #ifdef __P000 as a guard, the powerpc 
changes could go in a single patch.

>   pgprot_t vm_get_page_prot(unsigned long vm_flags)
>   {
>   	unsigned long prot = pgprot_val(protection_map[vm_flags &
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 61e6135c54ef..e66920414945 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
>    *								w: (no) no
>    *								x: (yes) yes
>    */
> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT

You should use #ifdef __P000 instead, that way you could migrate 
architectures one by one.

>   pgprot_t protection_map[16] __ro_after_init = {
>   	[VM_NONE]					= __P000,
>   	[VM_READ]					= __P001,
> @@ -120,7 +121,6 @@ pgprot_t protection_map[16] __ro_after_init = {
>   	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __S111
>   };
>   
> -#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>   pgprot_t vm_get_page_prot(unsigned long vm_flags)
>   {
>   	return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
kernel test robot June 16, 2022, 12:44 p.m. UTC | #2
Hi Anshuman,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20220616/202206162004.ak9KTfMD-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/4eb89368b130fe235d5e587bcc2eec18bb688e2d
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
        git checkout 4eb89368b130fe235d5e587bcc2eec18bb688e2d
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash arch/x86/

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from arch/x86/include/asm/percpu.h:27,
                    from arch/x86/include/asm/preempt.h:6,
                    from include/linux/preempt.h:78,
                    from include/linux/spinlock.h:55,
                    from include/linux/mmzone.h:8,
                    from include/linux/gfp.h:6,
                    from include/linux/mm.h:7,
                    from arch/x86/mm/mem_encrypt_amd.c:14:
   arch/x86/mm/mem_encrypt_amd.c: In function 'sme_early_init':
>> arch/x86/mm/mem_encrypt_amd.c:499:36: error: 'protection_map' undeclared (first use in this function)
     499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
         |                                    ^~~~~~~~~~~~~~
   include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
      55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
         |                                 ^~~
   arch/x86/mm/mem_encrypt_amd.c:499:36: note: each undeclared identifier is reported only once for each function it appears in
     499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
         |                                    ^~~~~~~~~~~~~~
   include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
      55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
         |                                 ^~~
   In file included from include/linux/bits.h:22,
                    from include/linux/ratelimit_types.h:5,
                    from include/linux/printk.h:9,
                    from include/asm-generic/bug.h:22,
                    from arch/x86/include/asm/bug.h:87,
                    from include/linux/bug.h:5,
                    from include/linux/mmdebug.h:5,
                    from include/linux/mm.h:6,
                    from arch/x86/mm/mem_encrypt_amd.c:14:
   include/linux/build_bug.h:16:51: error: bit-field '<anonymous>' width not an integer constant
      16 | #define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
         |                                                   ^
   include/linux/compiler.h:240:33: note: in expansion of macro 'BUILD_BUG_ON_ZERO'
     240 | #define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
         |                                 ^~~~~~~~~~~~~~~~~
   include/linux/kernel.h:55:59: note: in expansion of macro '__must_be_array'
      55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
         |                                                           ^~~~~~~~~~~~~~~
   arch/x86/mm/mem_encrypt_amd.c:499:25: note: in expansion of macro 'ARRAY_SIZE'
     499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
         |                         ^~~~~~~~~~


vim +/protection_map +499 arch/x86/mm/mem_encrypt_amd.c

1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  486  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  487  void __init sme_early_init(void)
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  488  {
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  489  	unsigned int i;
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  490  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  491  	if (!sme_me_mask)
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  492  		return;
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  493  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  494  	early_pmd_flags = __sme_set(early_pmd_flags);
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  495  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  496  	__supported_pte_mask = __sme_set(__supported_pte_mask);
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  497  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  498  	/* Update the protection map with memory encryption mask */
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22 @499  	for (i = 0; i < ARRAY_SIZE(protection_map); i++)
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  500  		protection_map[i] = pgprot_encrypted(protection_map[i]);
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  501  
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  502  	x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  503  	x86_platform.guest.enc_status_change_finish  = amd_enc_status_change_finish;
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  504  	x86_platform.guest.enc_tlb_flush_required    = amd_enc_tlb_flush_required;
1e8c5971c24989 arch/x86/mm/mem_encrypt_amd.c Brijesh Singh 2022-02-22  505  	x86_platform.guest.enc_cache_flush_required  = amd_enc_cache_flush_required;
f4495615d76cfe arch/x86/mm/mem_encrypt.c     Ashish Kalra  2021-08-24  506  }
f4495615d76cfe arch/x86/mm/mem_encrypt.c     Ashish Kalra  2021-08-24  507
Anshuman Khandual June 20, 2022, 4:45 a.m. UTC | #3
On 6/16/22 18:14, kernel test robot wrote:
> Hi Anshuman,
> 
> Thank you for the patch! Yet something to improve:
> 
> [auto build test ERROR on akpm-mm/mm-everything]
> 
> url:    https://github.com/intel-lab-lkp/linux/commits/Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20220616/202206162004.ak9KTfMD-lkp@intel.com/config)
> compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
> reproduce (this is a W=1 build):
>         # https://github.com/intel-lab-lkp/linux/commit/4eb89368b130fe235d5e587bcc2eec18bb688e2d
>         git remote add linux-review https://github.com/intel-lab-lkp/linux
>         git fetch --no-tags linux-review Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
>         git checkout 4eb89368b130fe235d5e587bcc2eec18bb688e2d
>         # save the config file
>         mkdir build_dir && cp config build_dir/.config
>         make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash arch/x86/
> 
> If you fix the issue, kindly add following tag where applicable
> Reported-by: kernel test robot <lkp@intel.com>
> 
> All errors (new ones prefixed by >>):
> 
>    In file included from arch/x86/include/asm/percpu.h:27,
>                     from arch/x86/include/asm/preempt.h:6,
>                     from include/linux/preempt.h:78,
>                     from include/linux/spinlock.h:55,
>                     from include/linux/mmzone.h:8,
>                     from include/linux/gfp.h:6,
>                     from include/linux/mm.h:7,
>                     from arch/x86/mm/mem_encrypt_amd.c:14:
>    arch/x86/mm/mem_encrypt_amd.c: In function 'sme_early_init':
>>> arch/x86/mm/mem_encrypt_amd.c:499:36: error: 'protection_map' undeclared (first use in this function)
>      499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>          |                                    ^~~~~~~~~~~~~~
>    include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
>       55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>          |                                 ^~~
>    arch/x86/mm/mem_encrypt_amd.c:499:36: note: each undeclared identifier is reported only once for each function it appears in
>      499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>          |                                    ^~~~~~~~~~~~~~
>    include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
>       55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>          |                                 ^~~
>    In file included from include/linux/bits.h:22,
>                     from include/linux/ratelimit_types.h:5,
>                     from include/linux/printk.h:9,
>                     from include/asm-generic/bug.h:22,
>                     from arch/x86/include/asm/bug.h:87,
>                     from include/linux/bug.h:5,
>                     from include/linux/mmdebug.h:5,
>                     from include/linux/mm.h:6,
>                     from arch/x86/mm/mem_encrypt_amd.c:14:
>    include/linux/build_bug.h:16:51: error: bit-field '<anonymous>' width not an integer constant
>       16 | #define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
>          |                                                   ^
>    include/linux/compiler.h:240:33: note: in expansion of macro 'BUILD_BUG_ON_ZERO'
>      240 | #define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
>          |                                 ^~~~~~~~~~~~~~~~~
>    include/linux/kernel.h:55:59: note: in expansion of macro '__must_be_array'
>       55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>          |                                                           ^~~~~~~~~~~~~~~
>    arch/x86/mm/mem_encrypt_amd.c:499:25: note: in expansion of macro 'ARRAY_SIZE'
>      499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>          |                         ^~~~~~~~~~

This patch fixes the build failure here.

diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index f6d038e2cd8e..d0c2ec1bb659 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -484,6 +484,8 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo
        enc_dec_hypercall(vaddr, npages, enc);
 }
 
+extern pgprot_t protection_map[16];
+
 void __init sme_early_init(void)
 {
        unsigned int i;
diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
index 7eca1b009af6..96eca0b2ec90 100644
--- a/arch/x86/mm/pgprot.c
+++ b/arch/x86/mm/pgprot.c
@@ -4,7 +4,7 @@
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
-static pgprot_t protection_map[16] __ro_after_init = {
+pgprot_t protection_map[16] __ro_after_init = {
        [VM_NONE]                                       = PAGE_NONE,
        [VM_READ]                                       = PAGE_READONLY,
        [VM_WRITE]                                      = PAGE_COPY,
Anshuman Khandual June 20, 2022, 5:16 a.m. UTC | #4
On 6/16/22 11:05, Christophe Leroy wrote:
> 
> Le 16/06/2022 à 06:09, Anshuman Khandual a écrit :
>> Restrict generic protection_map[] array visibility only for platforms which
>> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
>> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
>> their private static protection_map[] still implementing an array look up.
>> These private protection_map[] array could do without __PXXX/__SXXX macros,
>> making them redundant and dropping them off as well.
>>
>> But platforms which do not define their custom vm_get_page_prot() enabling
>> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
>>
>> Cc: Andrew Morton <akpm@linux-foundation.org>
>> Cc: linux-mm@kvack.org
>> Cc: linux-kernel@vger.kernel.org
>> Acked-by: Christoph Hellwig <hch@lst.de>
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>   arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
>>   arch/arm64/mm/mmap.c                  | 21 +++++++++++++++++++++
>>   arch/powerpc/include/asm/pgtable.h    |  2 ++
>>   arch/powerpc/mm/book3s64/pgtable.c    | 20 ++++++++++++++++++++
>>   arch/sparc/include/asm/pgtable_64.h   | 19 -------------------
>>   arch/sparc/mm/init_64.c               |  3 +++
>>   arch/x86/include/asm/pgtable_types.h  | 19 -------------------
>>   arch/x86/mm/pgprot.c                  | 19 +++++++++++++++++++
>>   include/linux/mm.h                    |  2 ++
>>   mm/mmap.c                             |  2 +-
>>   10 files changed, 68 insertions(+), 57 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
>> index d564d0ecd4cd..8ed2a80c896e 100644
>> --- a/arch/powerpc/include/asm/pgtable.h
>> +++ b/arch/powerpc/include/asm/pgtable.h
>> @@ -21,6 +21,7 @@ struct mm_struct;
>>   #endif /* !CONFIG_PPC_BOOK3S */
>>   
>>   /* Note due to the way vm flags are laid out, the bits are XWR */
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
> This ifdef if not necessary for now, it doesn't matter if __P000 etc 
> still exist thought not used.
> 
>>   #define __P000	PAGE_NONE
>>   #define __P001	PAGE_READONLY
>>   #define __P010	PAGE_COPY
>> @@ -38,6 +39,7 @@ struct mm_struct;
>>   #define __S101	PAGE_READONLY_X
>>   #define __S110	PAGE_SHARED_X
>>   #define __S111	PAGE_SHARED_X
>> +#endif
>>   
>>   #ifndef __ASSEMBLY__
>>   
>> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
>> index 7b9966402b25..d3b019b95c1d 100644
>> --- a/arch/powerpc/mm/book3s64/pgtable.c
>> +++ b/arch/powerpc/mm/book3s64/pgtable.c
>> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
>>   EXPORT_SYMBOL_GPL(memremap_compat_align);
>>   #endif
>>   
>> +/* Note due to the way vm flags are laid out, the bits are XWR */
>> +static const pgprot_t protection_map[16] = {
>> +	[VM_NONE]					= PAGE_NONE,
>> +	[VM_READ]					= PAGE_READONLY,
>> +	[VM_WRITE]					= PAGE_COPY,
>> +	[VM_WRITE | VM_READ]				= PAGE_COPY,
>> +	[VM_EXEC]					= PAGE_READONLY_X,
>> +	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
>> +	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
>> +	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
>> +	[VM_SHARED]					= PAGE_NONE,
>> +	[VM_SHARED | VM_READ]				= PAGE_READONLY,
>> +	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
>> +	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
>> +	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
>> +	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
>> +	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
>> +	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
>> +};
>> +
> There is not much point is first additing that here and then move it 
> elsewhere in the second patch.
> 
> I think with my suggestion to use #ifdef __P000 as a guard, the powerpc 
> changes could go in a single patch.
> 
>>   pgprot_t vm_get_page_prot(unsigned long vm_flags)
>>   {
>>   	unsigned long prot = pgprot_val(protection_map[vm_flags &
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index 61e6135c54ef..e66920414945 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
>>    *								w: (no) no
>>    *								x: (yes) yes
>>    */
>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
> You should use #ifdef __P000 instead, that way you could migrate 
> architectures one by one.

If vm_get_page_prot() gets moved into all platforms, wondering what would be
the preferred method to organize this patch series ?

1. Move protection_map[] inside platforms with ARCH_HAS_VM_PAGE_PROT (current patch 1)
2. Convert remaining platforms to use ARCH_HAS_VM_PAGE_PROT one after the other
3. Drop ARCH_HAS_VM_PAGE_PROT completely

Using "#ifdef __P000" for wrapping protection_map[] will leave two different #ifdefs
in flight i.e __P000, ARCH_HAS_VM_PAGE_PROT in the generic mmap code, until both gets
dropped eventually. But using "#ifdef __P000" does enable splitting the first patch
into multiple changes for each individual platforms.
Christoph Hellwig June 20, 2022, 5:55 a.m. UTC | #5
On Mon, Jun 20, 2022 at 10:15:31AM +0530, Anshuman Khandual wrote:
> +extern pgprot_t protection_map[16];


externs in .c files are never a good idea.  I'd rather add a helper
function toadd pgprot_encrypted to protection_map to pgprot.c.
Christophe Leroy June 20, 2022, 6:41 a.m. UTC | #6
Le 20/06/2022 à 07:16, Anshuman Khandual a écrit :
> 
> 
> On 6/16/22 11:05, Christophe Leroy wrote:
>>
>> Le 16/06/2022 à 06:09, Anshuman Khandual a écrit :
>>> Restrict generic protection_map[] array visibility only for platforms which
>>> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
>>> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
>>> their private static protection_map[] still implementing an array look up.
>>> These private protection_map[] array could do without __PXXX/__SXXX macros,
>>> making them redundant and dropping them off as well.
>>>
>>> But platforms which do not define their custom vm_get_page_prot() enabling
>>> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
>>>
>>> Cc: Andrew Morton <akpm@linux-foundation.org>
>>> Cc: linux-mm@kvack.org
>>> Cc: linux-kernel@vger.kernel.org
>>> Acked-by: Christoph Hellwig <hch@lst.de>
>>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>>> ---
>>>    arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
>>>    arch/arm64/mm/mmap.c                  | 21 +++++++++++++++++++++
>>>    arch/powerpc/include/asm/pgtable.h    |  2 ++
>>>    arch/powerpc/mm/book3s64/pgtable.c    | 20 ++++++++++++++++++++
>>>    arch/sparc/include/asm/pgtable_64.h   | 19 -------------------
>>>    arch/sparc/mm/init_64.c               |  3 +++
>>>    arch/x86/include/asm/pgtable_types.h  | 19 -------------------
>>>    arch/x86/mm/pgprot.c                  | 19 +++++++++++++++++++
>>>    include/linux/mm.h                    |  2 ++
>>>    mm/mmap.c                             |  2 +-
>>>    10 files changed, 68 insertions(+), 57 deletions(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
>>> index d564d0ecd4cd..8ed2a80c896e 100644
>>> --- a/arch/powerpc/include/asm/pgtable.h
>>> +++ b/arch/powerpc/include/asm/pgtable.h
>>> @@ -21,6 +21,7 @@ struct mm_struct;
>>>    #endif /* !CONFIG_PPC_BOOK3S */
>>>    
>>>    /* Note due to the way vm flags are laid out, the bits are XWR */
>>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>> This ifdef if not necessary for now, it doesn't matter if __P000 etc
>> still exist thought not used.
>>
>>>    #define __P000	PAGE_NONE
>>>    #define __P001	PAGE_READONLY
>>>    #define __P010	PAGE_COPY
>>> @@ -38,6 +39,7 @@ struct mm_struct;
>>>    #define __S101	PAGE_READONLY_X
>>>    #define __S110	PAGE_SHARED_X
>>>    #define __S111	PAGE_SHARED_X
>>> +#endif
>>>    
>>>    #ifndef __ASSEMBLY__
>>>    
>>> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
>>> index 7b9966402b25..d3b019b95c1d 100644
>>> --- a/arch/powerpc/mm/book3s64/pgtable.c
>>> +++ b/arch/powerpc/mm/book3s64/pgtable.c
>>> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
>>>    EXPORT_SYMBOL_GPL(memremap_compat_align);
>>>    #endif
>>>    
>>> +/* Note due to the way vm flags are laid out, the bits are XWR */
>>> +static const pgprot_t protection_map[16] = {
>>> +	[VM_NONE]					= PAGE_NONE,
>>> +	[VM_READ]					= PAGE_READONLY,
>>> +	[VM_WRITE]					= PAGE_COPY,
>>> +	[VM_WRITE | VM_READ]				= PAGE_COPY,
>>> +	[VM_EXEC]					= PAGE_READONLY_X,
>>> +	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
>>> +	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
>>> +	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
>>> +	[VM_SHARED]					= PAGE_NONE,
>>> +	[VM_SHARED | VM_READ]				= PAGE_READONLY,
>>> +	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
>>> +	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
>>> +	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
>>> +	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
>>> +	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
>>> +	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
>>> +};
>>> +
>> There is not much point is first additing that here and then move it
>> elsewhere in the second patch.
>>
>> I think with my suggestion to use #ifdef __P000 as a guard, the powerpc
>> changes could go in a single patch.
>>
>>>    pgprot_t vm_get_page_prot(unsigned long vm_flags)
>>>    {
>>>    	unsigned long prot = pgprot_val(protection_map[vm_flags &
>>> diff --git a/mm/mmap.c b/mm/mmap.c
>>> index 61e6135c54ef..e66920414945 100644
>>> --- a/mm/mmap.c
>>> +++ b/mm/mmap.c
>>> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
>>>     *								w: (no) no
>>>     *								x: (yes) yes
>>>     */
>>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>> You should use #ifdef __P000 instead, that way you could migrate
>> architectures one by one.
> 
> If vm_get_page_prot() gets moved into all platforms, wondering what would be
> the preferred method to organize this patch series ?
> 
> 1. Move protection_map[] inside platforms with ARCH_HAS_VM_PAGE_PROT (current patch 1)
> 2. Convert remaining platforms to use ARCH_HAS_VM_PAGE_PROT one after the other
> 3. Drop ARCH_HAS_VM_PAGE_PROT completely
> 
> Using "#ifdef __P000" for wrapping protection_map[] will leave two different #ifdefs
> in flight i.e __P000, ARCH_HAS_VM_PAGE_PROT in the generic mmap code, until both gets
> dropped eventually. But using "#ifdef __P000" does enable splitting the first patch
> into multiple changes for each individual platforms.

 From previous discussions and based on Christoph's suggestion, I guess 
we now aim at getting vm_get_page_prot() moved into all platforms 
together with protection_map[]. Therefore the use of #ifdef __P000 could 
be very temporary at the begining of the series:
1. Guard generic protection_map[] with #ifdef ___P000
2. Move protection_map[] into architecture and drop __Pxxx/__Sxxx  for arm64
3. Same for sparc
4. Same for x86
5. Convert entire powerpc to ARCH_HAS_VM_PAGE_PROT and move 
protection_map[] into architecture and drop __Pxxx/__Sxxx
6. Replace #ifdef __P000 by #ifdef CONFIG_ARCH_HAS_VM_PAGE_PROT
7. Convert all remaining platforms to CONFIG_ARCH_HAS_VM_PAGE_PROT one 
by one (but keep a protection_map[] table, don't use switch/case)
8. Drop ARCH_HAS_VM_PAGE_PROT completely.

Eventually you can squash step 6 into step 8.
Christophe Leroy June 20, 2022, 6:43 a.m. UTC | #7
Le 20/06/2022 à 06:45, Anshuman Khandual a écrit :
> 
> On 6/16/22 18:14, kernel test robot wrote:
>> Hi Anshuman,
>>
>> Thank you for the patch! Yet something to improve:
>>
>> [auto build test ERROR on akpm-mm/mm-everything]
>>
>> url:    https://github.com/intel-lab-lkp/linux/commits/Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
>> base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
>> config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20220616/202206162004.ak9KTfMD-lkp@intel.com/config)
>> compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
>> reproduce (this is a W=1 build):
>>          # https://github.com/intel-lab-lkp/linux/commit/4eb89368b130fe235d5e587bcc2eec18bb688e2d
>>          git remote add linux-review https://github.com/intel-lab-lkp/linux
>>          git fetch --no-tags linux-review Anshuman-Khandual/mm-mmap-Drop-__SXXX-__PXXX-macros-from-across-platforms/20220616-121132
>>          git checkout 4eb89368b130fe235d5e587bcc2eec18bb688e2d
>>          # save the config file
>>          mkdir build_dir && cp config build_dir/.config
>>          make W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash arch/x86/
>>
>> If you fix the issue, kindly add following tag where applicable
>> Reported-by: kernel test robot <lkp@intel.com>
>>
>> All errors (new ones prefixed by >>):
>>
>>     In file included from arch/x86/include/asm/percpu.h:27,
>>                      from arch/x86/include/asm/preempt.h:6,
>>                      from include/linux/preempt.h:78,
>>                      from include/linux/spinlock.h:55,
>>                      from include/linux/mmzone.h:8,
>>                      from include/linux/gfp.h:6,
>>                      from include/linux/mm.h:7,
>>                      from arch/x86/mm/mem_encrypt_amd.c:14:
>>     arch/x86/mm/mem_encrypt_amd.c: In function 'sme_early_init':
>>>> arch/x86/mm/mem_encrypt_amd.c:499:36: error: 'protection_map' undeclared (first use in this function)
>>       499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>           |                                    ^~~~~~~~~~~~~~
>>     include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
>>        55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>>           |                                 ^~~
>>     arch/x86/mm/mem_encrypt_amd.c:499:36: note: each undeclared identifier is reported only once for each function it appears in
>>       499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>           |                                    ^~~~~~~~~~~~~~
>>     include/linux/kernel.h:55:33: note: in definition of macro 'ARRAY_SIZE'
>>        55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>>           |                                 ^~~
>>     In file included from include/linux/bits.h:22,
>>                      from include/linux/ratelimit_types.h:5,
>>                      from include/linux/printk.h:9,
>>                      from include/asm-generic/bug.h:22,
>>                      from arch/x86/include/asm/bug.h:87,
>>                      from include/linux/bug.h:5,
>>                      from include/linux/mmdebug.h:5,
>>                      from include/linux/mm.h:6,
>>                      from arch/x86/mm/mem_encrypt_amd.c:14:
>>     include/linux/build_bug.h:16:51: error: bit-field '<anonymous>' width not an integer constant
>>        16 | #define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
>>           |                                                   ^
>>     include/linux/compiler.h:240:33: note: in expansion of macro 'BUILD_BUG_ON_ZERO'
>>       240 | #define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
>>           |                                 ^~~~~~~~~~~~~~~~~
>>     include/linux/kernel.h:55:59: note: in expansion of macro '__must_be_array'
>>        55 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
>>           |                                                           ^~~~~~~~~~~~~~~
>>     arch/x86/mm/mem_encrypt_amd.c:499:25: note: in expansion of macro 'ARRAY_SIZE'
>>       499 |         for (i = 0; i < ARRAY_SIZE(protection_map); i++)
>>           |                         ^~~~~~~~~~
> 
> This patch fixes the build failure here.
> 
> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
> index f6d038e2cd8e..d0c2ec1bb659 100644
> --- a/arch/x86/mm/mem_encrypt_amd.c
> +++ b/arch/x86/mm/mem_encrypt_amd.c
> @@ -484,6 +484,8 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo
>          enc_dec_hypercall(vaddr, npages, enc);
>   }
>   
> +extern pgprot_t protection_map[16];

Adding extern declaration in C files is not the best solution. Isn't 
there a H header with that declaration ?

> +
>   void __init sme_early_init(void)
>   {
>          unsigned int i;
> diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
> index 7eca1b009af6..96eca0b2ec90 100644
> --- a/arch/x86/mm/pgprot.c
> +++ b/arch/x86/mm/pgprot.c
> @@ -4,7 +4,7 @@
>   #include <linux/mm.h>
>   #include <asm/pgtable.h>
>   
> -static pgprot_t protection_map[16] __ro_after_init = {
> +pgprot_t protection_map[16] __ro_after_init = {
>          [VM_NONE]                                       = PAGE_NONE,
>          [VM_READ]                                       = PAGE_READONLY,
>          [VM_WRITE]                                      = PAGE_COPY,
>
Anshuman Khandual June 21, 2022, 9:44 a.m. UTC | #8
On 6/20/22 12:11, Christophe Leroy wrote:
> 
> 
> Le 20/06/2022 à 07:16, Anshuman Khandual a écrit :
>>
>>
>> On 6/16/22 11:05, Christophe Leroy wrote:
>>>
>>> Le 16/06/2022 à 06:09, Anshuman Khandual a écrit :
>>>> Restrict generic protection_map[] array visibility only for platforms which
>>>> do not enable ARCH_HAS_VM_GET_PAGE_PROT. For other platforms that do define
>>>> their own vm_get_page_prot() enabling ARCH_HAS_VM_GET_PAGE_PROT, could have
>>>> their private static protection_map[] still implementing an array look up.
>>>> These private protection_map[] array could do without __PXXX/__SXXX macros,
>>>> making them redundant and dropping them off as well.
>>>>
>>>> But platforms which do not define their custom vm_get_page_prot() enabling
>>>> ARCH_HAS_VM_GET_PAGE_PROT, will still have to provide __PXXX/__SXXX macros.
>>>>
>>>> Cc: Andrew Morton <akpm@linux-foundation.org>
>>>> Cc: linux-mm@kvack.org
>>>> Cc: linux-kernel@vger.kernel.org
>>>> Acked-by: Christoph Hellwig <hch@lst.de>
>>>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>>>> ---
>>>>    arch/arm64/include/asm/pgtable-prot.h | 18 ------------------
>>>>    arch/arm64/mm/mmap.c                  | 21 +++++++++++++++++++++
>>>>    arch/powerpc/include/asm/pgtable.h    |  2 ++
>>>>    arch/powerpc/mm/book3s64/pgtable.c    | 20 ++++++++++++++++++++
>>>>    arch/sparc/include/asm/pgtable_64.h   | 19 -------------------
>>>>    arch/sparc/mm/init_64.c               |  3 +++
>>>>    arch/x86/include/asm/pgtable_types.h  | 19 -------------------
>>>>    arch/x86/mm/pgprot.c                  | 19 +++++++++++++++++++
>>>>    include/linux/mm.h                    |  2 ++
>>>>    mm/mmap.c                             |  2 +-
>>>>    10 files changed, 68 insertions(+), 57 deletions(-)
>>>>
>>>> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
>>>> index d564d0ecd4cd..8ed2a80c896e 100644
>>>> --- a/arch/powerpc/include/asm/pgtable.h
>>>> +++ b/arch/powerpc/include/asm/pgtable.h
>>>> @@ -21,6 +21,7 @@ struct mm_struct;
>>>>    #endif /* !CONFIG_PPC_BOOK3S */
>>>>    
>>>>    /* Note due to the way vm flags are laid out, the bits are XWR */
>>>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>>> This ifdef if not necessary for now, it doesn't matter if __P000 etc
>>> still exist thought not used.
>>>
>>>>    #define __P000	PAGE_NONE
>>>>    #define __P001	PAGE_READONLY
>>>>    #define __P010	PAGE_COPY
>>>> @@ -38,6 +39,7 @@ struct mm_struct;
>>>>    #define __S101	PAGE_READONLY_X
>>>>    #define __S110	PAGE_SHARED_X
>>>>    #define __S111	PAGE_SHARED_X
>>>> +#endif
>>>>    
>>>>    #ifndef __ASSEMBLY__
>>>>    
>>>> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
>>>> index 7b9966402b25..d3b019b95c1d 100644
>>>> --- a/arch/powerpc/mm/book3s64/pgtable.c
>>>> +++ b/arch/powerpc/mm/book3s64/pgtable.c
>>>> @@ -551,6 +551,26 @@ unsigned long memremap_compat_align(void)
>>>>    EXPORT_SYMBOL_GPL(memremap_compat_align);
>>>>    #endif
>>>>    
>>>> +/* Note due to the way vm flags are laid out, the bits are XWR */
>>>> +static const pgprot_t protection_map[16] = {
>>>> +	[VM_NONE]					= PAGE_NONE,
>>>> +	[VM_READ]					= PAGE_READONLY,
>>>> +	[VM_WRITE]					= PAGE_COPY,
>>>> +	[VM_WRITE | VM_READ]				= PAGE_COPY,
>>>> +	[VM_EXEC]					= PAGE_READONLY_X,
>>>> +	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
>>>> +	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
>>>> +	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
>>>> +	[VM_SHARED]					= PAGE_NONE,
>>>> +	[VM_SHARED | VM_READ]				= PAGE_READONLY,
>>>> +	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
>>>> +	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
>>>> +	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
>>>> +	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
>>>> +	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
>>>> +	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
>>>> +};
>>>> +
>>> There is not much point is first additing that here and then move it
>>> elsewhere in the second patch.
>>>
>>> I think with my suggestion to use #ifdef __P000 as a guard, the powerpc
>>> changes could go in a single patch.
>>>
>>>>    pgprot_t vm_get_page_prot(unsigned long vm_flags)
>>>>    {
>>>>    	unsigned long prot = pgprot_val(protection_map[vm_flags &
>>>> diff --git a/mm/mmap.c b/mm/mmap.c
>>>> index 61e6135c54ef..e66920414945 100644
>>>> --- a/mm/mmap.c
>>>> +++ b/mm/mmap.c
>>>> @@ -101,6 +101,7 @@ static void unmap_region(struct mm_struct *mm,
>>>>     *								w: (no) no
>>>>     *								x: (yes) yes
>>>>     */
>>>> +#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
>>> You should use #ifdef __P000 instead, that way you could migrate
>>> architectures one by one.
>>
>> If vm_get_page_prot() gets moved into all platforms, wondering what would be
>> the preferred method to organize this patch series ?
>>
>> 1. Move protection_map[] inside platforms with ARCH_HAS_VM_PAGE_PROT (current patch 1)
>> 2. Convert remaining platforms to use ARCH_HAS_VM_PAGE_PROT one after the other
>> 3. Drop ARCH_HAS_VM_PAGE_PROT completely
>>
>> Using "#ifdef __P000" for wrapping protection_map[] will leave two different #ifdefs
>> in flight i.e __P000, ARCH_HAS_VM_PAGE_PROT in the generic mmap code, until both gets
>> dropped eventually. But using "#ifdef __P000" does enable splitting the first patch
>> into multiple changes for each individual platforms.
> 
>  From previous discussions and based on Christoph's suggestion, I guess 
> we now aim at getting vm_get_page_prot() moved into all platforms 
> together with protection_map[]. Therefore the use of #ifdef __P000 could 
> be very temporary at the begining of the series:
> 1. Guard generic protection_map[] with #ifdef ___P000
> 2. Move protection_map[] into architecture and drop __Pxxx/__Sxxx  for arm64
> 3. Same for sparc
> 4. Same for x86
> 5. Convert entire powerpc to ARCH_HAS_VM_PAGE_PROT and move 
> protection_map[] into architecture and drop __Pxxx/__Sxxx
> 6. Replace #ifdef __P000 by #ifdef CONFIG_ARCH_HAS_VM_PAGE_PROT
> 7. Convert all remaining platforms to CONFIG_ARCH_HAS_VM_PAGE_PROT one 
> by one (but keep a protection_map[] table, don't use switch/case)
> 8. Drop ARCH_HAS_VM_PAGE_PROT completely.
> 
> Eventually you can squash step 6 into step 8.

Keeping individual platform changes in a separate patch will make
the series cleaner, and also much easier to review. But the flow
explained above sounds good to me. I will work on these changes.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 62e0ebeed720..9b165117a454 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -89,24 +89,6 @@  extern bool arm64_use_ng_mappings;
 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
 #define PAGE_EXECONLY		__pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
-#define __P000  PAGE_NONE
-#define __P001  PAGE_READONLY
-#define __P010  PAGE_READONLY
-#define __P011  PAGE_READONLY
-#define __P100  PAGE_READONLY_EXEC	/* PAGE_EXECONLY if Enhanced PAN */
-#define __P101  PAGE_READONLY_EXEC
-#define __P110  PAGE_READONLY_EXEC
-#define __P111  PAGE_READONLY_EXEC
-
-#define __S000  PAGE_NONE
-#define __S001  PAGE_READONLY
-#define __S010  PAGE_SHARED
-#define __S011  PAGE_SHARED
-#define __S100  PAGE_READONLY_EXEC	/* PAGE_EXECONLY if Enhanced PAN */
-#define __S101  PAGE_READONLY_EXEC
-#define __S110  PAGE_SHARED_EXEC
-#define __S111  PAGE_SHARED_EXEC
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 78e9490f748d..8f5b7ce857ed 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -13,6 +13,27 @@ 
 #include <asm/cpufeature.h>
 #include <asm/page.h>
 
+static pgprot_t protection_map[16] __ro_after_init = {
+	[VM_NONE]					= PAGE_NONE,
+	[VM_READ]					= PAGE_READONLY,
+	[VM_WRITE]					= PAGE_READONLY,
+	[VM_WRITE | VM_READ]				= PAGE_READONLY,
+	/* PAGE_EXECONLY if Enhanced PAN */
+	[VM_EXEC]					= PAGE_READONLY_EXEC,
+	[VM_EXEC | VM_READ]				= PAGE_READONLY_EXEC,
+	[VM_EXEC | VM_WRITE]				= PAGE_READONLY_EXEC,
+	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_READONLY_EXEC,
+	[VM_SHARED]					= PAGE_NONE,
+	[VM_SHARED | VM_READ]				= PAGE_READONLY,
+	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
+	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
+	/* PAGE_EXECONLY if Enhanced PAN */
+	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_EXEC,
+	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_EXEC,
+	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
+	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
+};
+
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This might go
  * away in the future.
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index d564d0ecd4cd..8ed2a80c896e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -21,6 +21,7 @@  struct mm_struct;
 #endif /* !CONFIG_PPC_BOOK3S */
 
 /* Note due to the way vm flags are laid out, the bits are XWR */
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
 #define __P000	PAGE_NONE
 #define __P001	PAGE_READONLY
 #define __P010	PAGE_COPY
@@ -38,6 +39,7 @@  struct mm_struct;
 #define __S101	PAGE_READONLY_X
 #define __S110	PAGE_SHARED_X
 #define __S111	PAGE_SHARED_X
+#endif
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 7b9966402b25..d3b019b95c1d 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -551,6 +551,26 @@  unsigned long memremap_compat_align(void)
 EXPORT_SYMBOL_GPL(memremap_compat_align);
 #endif
 
+/* Note due to the way vm flags are laid out, the bits are XWR */
+static const pgprot_t protection_map[16] = {
+	[VM_NONE]					= PAGE_NONE,
+	[VM_READ]					= PAGE_READONLY,
+	[VM_WRITE]					= PAGE_COPY,
+	[VM_WRITE | VM_READ]				= PAGE_COPY,
+	[VM_EXEC]					= PAGE_READONLY_X,
+	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
+	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
+	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
+	[VM_SHARED]					= PAGE_NONE,
+	[VM_SHARED | VM_READ]				= PAGE_READONLY,
+	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
+	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
+	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
+	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
+	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
+	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
+};
+
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
 	unsigned long prot = pgprot_val(protection_map[vm_flags &
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 4679e45c8348..a779418ceba9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -187,25 +187,6 @@  bool kern_addr_valid(unsigned long addr);
 #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
 #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
 
-/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
-#define __P000	__pgprot(0)
-#define __P001	__pgprot(0)
-#define __P010	__pgprot(0)
-#define __P011	__pgprot(0)
-#define __P100	__pgprot(0)
-#define __P101	__pgprot(0)
-#define __P110	__pgprot(0)
-#define __P111	__pgprot(0)
-
-#define __S000	__pgprot(0)
-#define __S001	__pgprot(0)
-#define __S010	__pgprot(0)
-#define __S011	__pgprot(0)
-#define __S100	__pgprot(0)
-#define __S101	__pgprot(0)
-#define __S110	__pgprot(0)
-#define __S111	__pgprot(0)
-
 #ifndef __ASSEMBLY__
 
 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f6174df2d5af..d6faee23c77d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2634,6 +2634,9 @@  void vmemmap_free(unsigned long start, unsigned long end,
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
+/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
+static pgprot_t protection_map[16] __ro_after_init;
+
 static void prot_init_common(unsigned long page_none,
 			     unsigned long page_shared,
 			     unsigned long page_copy,
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index bdaf8391e2e0..aa174fed3a71 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -230,25 +230,6 @@  enum page_cache_mode {
 
 #endif	/* __ASSEMBLY__ */
 
-/*         xwr */
-#define __P000	PAGE_NONE
-#define __P001	PAGE_READONLY
-#define __P010	PAGE_COPY
-#define __P011	PAGE_COPY
-#define __P100	PAGE_READONLY_EXEC
-#define __P101	PAGE_READONLY_EXEC
-#define __P110	PAGE_COPY_EXEC
-#define __P111	PAGE_COPY_EXEC
-
-#define __S000	PAGE_NONE
-#define __S001	PAGE_READONLY
-#define __S010	PAGE_SHARED
-#define __S011	PAGE_SHARED
-#define __S100	PAGE_READONLY_EXEC
-#define __S101	PAGE_READONLY_EXEC
-#define __S110	PAGE_SHARED_EXEC
-#define __S111	PAGE_SHARED_EXEC
-
 /*
  * early identity mapping  pte attrib macros.
  */
diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c
index 763742782286..7eca1b009af6 100644
--- a/arch/x86/mm/pgprot.c
+++ b/arch/x86/mm/pgprot.c
@@ -4,6 +4,25 @@ 
 #include <linux/mm.h>
 #include <asm/pgtable.h>
 
+static pgprot_t protection_map[16] __ro_after_init = {
+	[VM_NONE]					= PAGE_NONE,
+	[VM_READ]					= PAGE_READONLY,
+	[VM_WRITE]					= PAGE_COPY,
+	[VM_WRITE | VM_READ]				= PAGE_COPY,
+	[VM_EXEC]					= PAGE_READONLY_EXEC,
+	[VM_EXEC | VM_READ]				= PAGE_READONLY_EXEC,
+	[VM_EXEC | VM_WRITE]				= PAGE_COPY_EXEC,
+	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_EXEC,
+	[VM_SHARED]					= PAGE_NONE,
+	[VM_SHARED | VM_READ]				= PAGE_READONLY,
+	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
+	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
+	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_EXEC,
+	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_EXEC,
+	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
+	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
+};
+
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
 	unsigned long val = pgprot_val(protection_map[vm_flags &
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bc8f326be0ce..2254c1980c8e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -420,11 +420,13 @@  extern unsigned int kobjsize(const void *objp);
 #endif
 #define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
 
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
 /*
  * mapping from the currently active vm_flags protection bits (the
  * low four bits) to a page protection mask..
  */
 extern pgprot_t protection_map[16];
+#endif
 
 /*
  * The default fault flags that should be used by most of the
diff --git a/mm/mmap.c b/mm/mmap.c
index 61e6135c54ef..e66920414945 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -101,6 +101,7 @@  static void unmap_region(struct mm_struct *mm,
  *								w: (no) no
  *								x: (yes) yes
  */
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
 pgprot_t protection_map[16] __ro_after_init = {
 	[VM_NONE]					= __P000,
 	[VM_READ]					= __P001,
@@ -120,7 +121,6 @@  pgprot_t protection_map[16] __ro_after_init = {
 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __S111
 };
 
-#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
 	return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];