diff mbox series

[RFC,V2,01/10] mm/mmap: Dynamically initialize protection_map[]

Message ID 1627281445-12445-2-git-send-email-anshuman.khandual@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64/mm: Enable FEAT_LPA2 (52 bits PA support on 4K|16K pages) | expand

Commit Message

Anshuman Khandual July 26, 2021, 6:37 a.m. UTC
The protection_map[] elements (__PXXX and __SXXX) might sometimes contain
runtime variables in certain platforms like arm64 preventing a successful
build because of the current static initialization. So it just defers the
initialization until mmmap_init() via a new helper init_protection_map().

Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
---
 mm/mmap.c | 26 ++++++++++++++++++++++----
 1 file changed, 22 insertions(+), 4 deletions(-)

Comments

Catalin Marinas Aug. 5, 2021, 5:03 p.m. UTC | #1
On Mon, Jul 26, 2021 at 12:07:16PM +0530, Anshuman Khandual wrote:
> The protection_map[] elements (__PXXX and __SXXX) might sometimes contain
> runtime variables in certain platforms like arm64 preventing a successful
> build because of the current static initialization. So it just defers the
> initialization until mmmap_init() via a new helper init_protection_map().
> 
> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
> ---
>  mm/mmap.c | 26 ++++++++++++++++++++++----
>  1 file changed, 22 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/mmap.c b/mm/mmap.c
> index ca54d36..a95b078 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -100,10 +100,7 @@ static void unmap_region(struct mm_struct *mm,
>   *								w: (no) no
>   *								x: (yes) yes
>   */
> -pgprot_t protection_map[16] __ro_after_init = {
> -	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
> -	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
> -};
> +pgprot_t protection_map[16] __ro_after_init;

Mips, x86, sparc, arm32, m68k all adjust protection_map[] during boot.
Could we do something similar here and avoid changing the generic code?
Anshuman Khandual Aug. 12, 2021, 9:15 a.m. UTC | #2
On 8/5/21 10:33 PM, Catalin Marinas wrote:
> On Mon, Jul 26, 2021 at 12:07:16PM +0530, Anshuman Khandual wrote:
>> The protection_map[] elements (__PXXX and __SXXX) might sometimes contain
>> runtime variables in certain platforms like arm64 preventing a successful
>> build because of the current static initialization. So it just defers the
>> initialization until mmmap_init() via a new helper init_protection_map().
>>
>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>> ---
>>  mm/mmap.c | 26 ++++++++++++++++++++++----
>>  1 file changed, 22 insertions(+), 4 deletions(-)
>>
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index ca54d36..a95b078 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -100,10 +100,7 @@ static void unmap_region(struct mm_struct *mm,
>>   *								w: (no) no
>>   *								x: (yes) yes
>>   */
>> -pgprot_t protection_map[16] __ro_after_init = {
>> -	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
>> -	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
>> -};
>> +pgprot_t protection_map[16] __ro_after_init;
> 
> Mips, x86, sparc, arm32, m68k all adjust protection_map[] during boot.
> Could we do something similar here and avoid changing the generic code?

If __P[000..111] and __S[000..111] be made dummy values (e.g 0 or something
standard prot temporarily), hence the compilation problem could be avoided.
Later in the platform code, protection_map[] could be adjusted with actual
prot values which would involve variable.
Anshuman Khandual Aug. 13, 2021, 7:16 a.m. UTC | #3
On 8/12/21 2:45 PM, Anshuman Khandual wrote:
> 
> 
> On 8/5/21 10:33 PM, Catalin Marinas wrote:
>> On Mon, Jul 26, 2021 at 12:07:16PM +0530, Anshuman Khandual wrote:
>>> The protection_map[] elements (__PXXX and __SXXX) might sometimes contain
>>> runtime variables in certain platforms like arm64 preventing a successful
>>> build because of the current static initialization. So it just defers the
>>> initialization until mmmap_init() via a new helper init_protection_map().
>>>
>>> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
>>> ---
>>>  mm/mmap.c | 26 ++++++++++++++++++++++----
>>>  1 file changed, 22 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/mm/mmap.c b/mm/mmap.c
>>> index ca54d36..a95b078 100644
>>> --- a/mm/mmap.c
>>> +++ b/mm/mmap.c
>>> @@ -100,10 +100,7 @@ static void unmap_region(struct mm_struct *mm,
>>>   *								w: (no) no
>>>   *								x: (yes) yes
>>>   */
>>> -pgprot_t protection_map[16] __ro_after_init = {
>>> -	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
>>> -	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
>>> -};
>>> +pgprot_t protection_map[16] __ro_after_init;
>>
>> Mips, x86, sparc, arm32, m68k all adjust protection_map[] during boot.
>> Could we do something similar here and avoid changing the generic code?
> 
> If __P[000..111] and __S[000..111] be made dummy values (e.g 0 or something
> standard prot temporarily), hence the compilation problem could be avoided.
> Later in the platform code, protection_map[] could be adjusted with actual
> prot values which would involve variable.
> 

Following change seems to work without any problem. Using __pgprot(0) as an
temporary protection value should be okay, as vm_get_page_prot() which uses
protection_map[] should never be called before mem_init() ?

 arch/arm64/include/asm/pgtable-prot.h | 34 +++++++++++++++++-----------------
 arch/arm64/mm/init.c                  | 22 ++++++++++++++++++++++
 2 files changed, 39 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 7032f04..539503a 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -88,23 +88,23 @@ extern bool arm64_use_ng_mappings;
 #define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
 #define PAGE_EXECONLY		__pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
 
-#define __P000  PAGE_NONE
-#define __P001  PAGE_READONLY
-#define __P010  PAGE_READONLY
-#define __P011  PAGE_READONLY
-#define __P100  PAGE_EXECONLY
-#define __P101  PAGE_READONLY_EXEC
-#define __P110  PAGE_READONLY_EXEC
-#define __P111  PAGE_READONLY_EXEC
-
-#define __S000  PAGE_NONE
-#define __S001  PAGE_READONLY
-#define __S010  PAGE_SHARED
-#define __S011  PAGE_SHARED
-#define __S100  PAGE_EXECONLY
-#define __S101  PAGE_READONLY_EXEC
-#define __S110  PAGE_SHARED_EXEC
-#define __S111  PAGE_SHARED_EXEC
+#define __P000  __pgprot(0)
+#define __P001  __pgprot(0)
+#define __P010  __pgprot(0)
+#define __P011  __pgprot(0)
+#define __P100  __pgprot(0)
+#define __P101  __pgprot(0)
+#define __P110  __pgprot(0)
+#define __P111  __pgprot(0)
+
+#define __S000  __pgprot(0)
+#define __S001  __pgprot(0)
+#define __S010  __pgprot(0)
+#define __S011  __pgprot(0)
+#define __S100  __pgprot(0)
+#define __S101  __pgprot(0)
+#define __S110  __pgprot(0)
+#define __S111  __pgprot(0)
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8490ed2..51662b3 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -448,6 +448,27 @@ void __init bootmem_init(void)
 	memblock_dump_all();
 }
 
+static void init_protection_map(void)
+{
+	protection_map[0] = PAGE_NONE;
+	protection_map[1] = PAGE_READONLY;
+	protection_map[2] = PAGE_READONLY;
+	protection_map[3] = PAGE_READONLY;
+	protection_map[4] = PAGE_READONLY;
+	protection_map[5] = PAGE_READONLY_EXEC;
+	protection_map[6] = PAGE_READONLY_EXEC;
+	protection_map[7] = PAGE_READONLY_EXEC;
+
+	protection_map[8] = PAGE_NONE;
+	protection_map[9] = PAGE_READONLY;
+	protection_map[10] = PAGE_SHARED;
+	protection_map[11] = PAGE_SHARED;
+	protection_map[12] = PAGE_EXECONLY;
+	protection_map[13] = PAGE_READONLY_EXEC;
+	protection_map[14] = PAGE_SHARED_EXEC;
+	protection_map[15] = PAGE_SHARED_EXEC;
+}
+
 /*
  * mem_init() marks the free areas in the mem_map and tells us how much memory
  * is free.  This is done after various parts of the system have claimed their
@@ -489,6 +510,7 @@ void __init mem_init(void)
 		 */
 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
 	}
+	init_protection_map();
 }
 
 void free_initmem(void)
diff mbox series

Patch

diff --git a/mm/mmap.c b/mm/mmap.c
index ca54d36..a95b078 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -100,10 +100,7 @@  static void unmap_region(struct mm_struct *mm,
  *								w: (no) no
  *								x: (yes) yes
  */
-pgprot_t protection_map[16] __ro_after_init = {
-	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
-	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
-};
+pgprot_t protection_map[16] __ro_after_init;
 
 #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
 static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
@@ -3708,6 +3705,26 @@  void mm_drop_all_locks(struct mm_struct *mm)
 	mutex_unlock(&mm_all_locks_mutex);
 }
 
+static void init_protection_map(void)
+{
+	protection_map[0] = __P000;
+	protection_map[1] = __P001;
+	protection_map[2] = __P010;
+	protection_map[3] = __P011;
+	protection_map[4] = __P100;
+	protection_map[5] = __P101;
+	protection_map[6] = __P110;
+	protection_map[7] = __P111;
+	protection_map[8] = __S000;
+	protection_map[9] = __S001;
+	protection_map[10] = __S010;
+	protection_map[11] = __S011;
+	protection_map[12] = __S100;
+	protection_map[13] = __S101;
+	protection_map[14] = __S110;
+	protection_map[15] = __S111;
+}
+
 /*
  * initialise the percpu counter for VM
  */
@@ -3715,6 +3732,7 @@  void __init mmap_init(void)
 {
 	int ret;
 
+	init_protection_map();
 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 	VM_BUG_ON(ret);
 }