diff mbox series

[v2,1/4] arm64: mm: Don't remap pgtables per-cont(pte|pmd) block

Message ID 20240404143308.2224141-2-ryan.roberts@arm.com (mailing list archive)
State New, archived
Headers show
Series Speed up boot with faster linear map creation | expand

Commit Message

Ryan Roberts April 4, 2024, 2:33 p.m. UTC
A large part of the kernel boot time is creating the kernel linear map
page tables. When rodata=full, all memory is mapped by pte. And when
there is lots of physical ram, there are lots of pte tables to populate.
The primary cost associated with this is mapping and unmapping the pte
table memory in the fixmap; at unmap time, the TLB entry must be
invalidated and this is expensive.

Previously, each pmd and pte table was fixmapped/fixunmapped for each
cont(pte|pmd) block of mappings (16 entries with 4K granule). This means
we ended up issuing 32 TLBIs per (pmd|pte) table during the population
phase.

Let's fix that, and fixmap/fixunmap each page once per population, for a
saving of 31 TLBIs per (pmd|pte) table. This gives a significant boot
speedup.

Execution time of map_mem(), which creates the kernel linear map page
tables, was measured on different machines with different RAM configs:

               | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
               | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
---------------|-------------|-------------|-------------|-------------
               |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
---------------|-------------|-------------|-------------|-------------
before         |  153   (0%) | 2227   (0%) | 8798   (0%) | 17442   (0%)
after          |   77 (-49%) |  431 (-81%) | 1727 (-80%) |  3796 (-78%)

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
Tested-by: Eric Chanudet <echanude@redhat.com>
---
 arch/arm64/mm/mmu.c | 32 ++++++++++++++++++--------------
 1 file changed, 18 insertions(+), 14 deletions(-)

Comments

Mark Rutland April 10, 2024, 9:46 a.m. UTC | #1
On Thu, Apr 04, 2024 at 03:33:05PM +0100, Ryan Roberts wrote:
> A large part of the kernel boot time is creating the kernel linear map
> page tables. When rodata=full, all memory is mapped by pte. And when
> there is lots of physical ram, there are lots of pte tables to populate.
> The primary cost associated with this is mapping and unmapping the pte
> table memory in the fixmap; at unmap time, the TLB entry must be
> invalidated and this is expensive.
> 
> Previously, each pmd and pte table was fixmapped/fixunmapped for each
> cont(pte|pmd) block of mappings (16 entries with 4K granule). This means
> we ended up issuing 32 TLBIs per (pmd|pte) table during the population
> phase.
> 
> Let's fix that, and fixmap/fixunmap each page once per population, for a
> saving of 31 TLBIs per (pmd|pte) table. This gives a significant boot
> speedup.
> 
> Execution time of map_mem(), which creates the kernel linear map page
> tables, was measured on different machines with different RAM configs:
> 
>                | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
>                | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
> ---------------|-------------|-------------|-------------|-------------
>                |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
> ---------------|-------------|-------------|-------------|-------------
> before         |  153   (0%) | 2227   (0%) | 8798   (0%) | 17442   (0%)
> after          |   77 (-49%) |  431 (-81%) | 1727 (-80%) |  3796 (-78%)
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
> Tested-by: Eric Chanudet <echanude@redhat.com>
> ---
>  arch/arm64/mm/mmu.c | 32 ++++++++++++++++++--------------
>  1 file changed, 18 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 495b732d5af3..fd91b5bdb514 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -172,12 +172,9 @@ bool pgattr_change_is_safe(u64 old, u64 new)
>  	return ((old ^ new) & ~mask) == 0;
>  }
>  
> -static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
> -		     phys_addr_t phys, pgprot_t prot)
> +static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
> +		       phys_addr_t phys, pgprot_t prot)
>  {
> -	pte_t *ptep;
> -
> -	ptep = pte_set_fixmap_offset(pmdp, addr);
>  	do {
>  		pte_t old_pte = __ptep_get(ptep);
>  
> @@ -193,7 +190,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  		phys += PAGE_SIZE;
>  	} while (ptep++, addr += PAGE_SIZE, addr != end);
>  
> -	pte_clear_fixmap();
> +	return ptep;
>  }
>  
>  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
> @@ -204,6 +201,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  {
>  	unsigned long next;
>  	pmd_t pmd = READ_ONCE(*pmdp);
> +	pte_t *ptep;
>  
>  	BUG_ON(pmd_sect(pmd));
>  	if (pmd_none(pmd)) {
> @@ -219,6 +217,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  	}
>  	BUG_ON(pmd_bad(pmd));
>  
> +	ptep = pte_set_fixmap_offset(pmdp, addr);
>  	do {
>  		pgprot_t __prot = prot;
>  
> @@ -229,20 +228,20 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>  		    (flags & NO_CONT_MAPPINGS) == 0)
>  			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>  
> -		init_pte(pmdp, addr, next, phys, __prot);
> +		ptep = init_pte(ptep, addr, next, phys, __prot);
>  
>  		phys += next - addr;
>  	} while (addr = next, addr != end);

I reckon it might be better to leave init_pte() returning void, and move the
ptep along here, e.g.

	ptep = pte_set_fixmap_offset(pmdp, addr);
	do {
		...

		init_pte(ptep, addr, next, phys, __prot);

		ptep += pte_index(next) - pte_index(addr);
		phys += next - addr;
	} while (addr = next, addr != end);


... as that keeps the relationship between 'ptep' and 'phys' clear since
they're manipulated in the same way, adjacent to one another.

Regardless this looks good, so with that change or as-is:

Acked-by: Mark Rutland <mark.rutland@arm.com>

... though I would prefer with that change. ;)

Mark.
Ryan Roberts April 10, 2024, 10:27 a.m. UTC | #2
On 10/04/2024 10:46, Mark Rutland wrote:
> On Thu, Apr 04, 2024 at 03:33:05PM +0100, Ryan Roberts wrote:
>> A large part of the kernel boot time is creating the kernel linear map
>> page tables. When rodata=full, all memory is mapped by pte. And when
>> there is lots of physical ram, there are lots of pte tables to populate.
>> The primary cost associated with this is mapping and unmapping the pte
>> table memory in the fixmap; at unmap time, the TLB entry must be
>> invalidated and this is expensive.
>>
>> Previously, each pmd and pte table was fixmapped/fixunmapped for each
>> cont(pte|pmd) block of mappings (16 entries with 4K granule). This means
>> we ended up issuing 32 TLBIs per (pmd|pte) table during the population
>> phase.
>>
>> Let's fix that, and fixmap/fixunmap each page once per population, for a
>> saving of 31 TLBIs per (pmd|pte) table. This gives a significant boot
>> speedup.
>>
>> Execution time of map_mem(), which creates the kernel linear map page
>> tables, was measured on different machines with different RAM configs:
>>
>>                | Apple M2 VM | Ampere Altra| Ampere Altra| Ampere Altra
>>                | VM, 16G     | VM, 64G     | VM, 256G    | Metal, 512G
>> ---------------|-------------|-------------|-------------|-------------
>>                |   ms    (%) |   ms    (%) |   ms    (%) |    ms    (%)
>> ---------------|-------------|-------------|-------------|-------------
>> before         |  153   (0%) | 2227   (0%) | 8798   (0%) | 17442   (0%)
>> after          |   77 (-49%) |  431 (-81%) | 1727 (-80%) |  3796 (-78%)
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> Tested-by: Itaru Kitayama <itaru.kitayama@fujitsu.com>
>> Tested-by: Eric Chanudet <echanude@redhat.com>
>> ---
>>  arch/arm64/mm/mmu.c | 32 ++++++++++++++++++--------------
>>  1 file changed, 18 insertions(+), 14 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 495b732d5af3..fd91b5bdb514 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -172,12 +172,9 @@ bool pgattr_change_is_safe(u64 old, u64 new)
>>  	return ((old ^ new) & ~mask) == 0;
>>  }
>>  
>> -static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
>> -		     phys_addr_t phys, pgprot_t prot)
>> +static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
>> +		       phys_addr_t phys, pgprot_t prot)
>>  {
>> -	pte_t *ptep;
>> -
>> -	ptep = pte_set_fixmap_offset(pmdp, addr);
>>  	do {
>>  		pte_t old_pte = __ptep_get(ptep);
>>  
>> @@ -193,7 +190,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
>>  		phys += PAGE_SIZE;
>>  	} while (ptep++, addr += PAGE_SIZE, addr != end);
>>  
>> -	pte_clear_fixmap();
>> +	return ptep;
>>  }
>>  
>>  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>> @@ -204,6 +201,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>  {
>>  	unsigned long next;
>>  	pmd_t pmd = READ_ONCE(*pmdp);
>> +	pte_t *ptep;
>>  
>>  	BUG_ON(pmd_sect(pmd));
>>  	if (pmd_none(pmd)) {
>> @@ -219,6 +217,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>  	}
>>  	BUG_ON(pmd_bad(pmd));
>>  
>> +	ptep = pte_set_fixmap_offset(pmdp, addr);
>>  	do {
>>  		pgprot_t __prot = prot;
>>  
>> @@ -229,20 +228,20 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
>>  		    (flags & NO_CONT_MAPPINGS) == 0)
>>  			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
>>  
>> -		init_pte(pmdp, addr, next, phys, __prot);
>> +		ptep = init_pte(ptep, addr, next, phys, __prot);
>>  
>>  		phys += next - addr;
>>  	} while (addr = next, addr != end);
> 
> I reckon it might be better to leave init_pte() returning void, and move the
> ptep along here, e.g.
> 
> 	ptep = pte_set_fixmap_offset(pmdp, addr);
> 	do {
> 		...
> 
> 		init_pte(ptep, addr, next, phys, __prot);
> 
> 		ptep += pte_index(next) - pte_index(addr);
> 		phys += next - addr;
> 	} while (addr = next, addr != end);
> 
> 
> ... as that keeps the relationship between 'ptep' and 'phys' clear since
> they're manipulated in the same way, adjacent to one another.
> 
> Regardless this looks good, so with that change or as-is:
> 
> Acked-by: Mark Rutland <mark.rutland@arm.com>
> 
> ... though I would prefer with that change. ;)

Yep, will change. And I'll do the same for pmd_init() too.

> 
> Mark.
diff mbox series

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 495b732d5af3..fd91b5bdb514 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -172,12 +172,9 @@  bool pgattr_change_is_safe(u64 old, u64 new)
 	return ((old ^ new) & ~mask) == 0;
 }
 
-static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot)
+static pte_t *init_pte(pte_t *ptep, unsigned long addr, unsigned long end,
+		       phys_addr_t phys, pgprot_t prot)
 {
-	pte_t *ptep;
-
-	ptep = pte_set_fixmap_offset(pmdp, addr);
 	do {
 		pte_t old_pte = __ptep_get(ptep);
 
@@ -193,7 +190,7 @@  static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
 		phys += PAGE_SIZE;
 	} while (ptep++, addr += PAGE_SIZE, addr != end);
 
-	pte_clear_fixmap();
+	return ptep;
 }
 
 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
@@ -204,6 +201,7 @@  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 {
 	unsigned long next;
 	pmd_t pmd = READ_ONCE(*pmdp);
+	pte_t *ptep;
 
 	BUG_ON(pmd_sect(pmd));
 	if (pmd_none(pmd)) {
@@ -219,6 +217,7 @@  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 	}
 	BUG_ON(pmd_bad(pmd));
 
+	ptep = pte_set_fixmap_offset(pmdp, addr);
 	do {
 		pgprot_t __prot = prot;
 
@@ -229,20 +228,20 @@  static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 		    (flags & NO_CONT_MAPPINGS) == 0)
 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 
-		init_pte(pmdp, addr, next, phys, __prot);
+		ptep = init_pte(ptep, addr, next, phys, __prot);
 
 		phys += next - addr;
 	} while (addr = next, addr != end);
+
+	pte_clear_fixmap();
 }
 
-static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
-		     phys_addr_t phys, pgprot_t prot,
-		     phys_addr_t (*pgtable_alloc)(int), int flags)
+static pmd_t *init_pmd(pmd_t *pmdp, unsigned long addr, unsigned long end,
+		       phys_addr_t phys, pgprot_t prot,
+		       phys_addr_t (*pgtable_alloc)(int), int flags)
 {
 	unsigned long next;
-	pmd_t *pmdp;
 
-	pmdp = pmd_set_fixmap_offset(pudp, addr);
 	do {
 		pmd_t old_pmd = READ_ONCE(*pmdp);
 
@@ -269,7 +268,7 @@  static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
 		phys += next - addr;
 	} while (pmdp++, addr = next, addr != end);
 
-	pmd_clear_fixmap();
+	return pmdp;
 }
 
 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
@@ -279,6 +278,7 @@  static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 {
 	unsigned long next;
 	pud_t pud = READ_ONCE(*pudp);
+	pmd_t *pmdp;
 
 	/*
 	 * Check for initial section mappings in the pgd/pud.
@@ -297,6 +297,7 @@  static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 	}
 	BUG_ON(pud_bad(pud));
 
+	pmdp = pmd_set_fixmap_offset(pudp, addr);
 	do {
 		pgprot_t __prot = prot;
 
@@ -307,10 +308,13 @@  static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 		    (flags & NO_CONT_MAPPINGS) == 0)
 			__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 
-		init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
+		pmdp = init_pmd(pmdp, addr, next, phys, __prot, pgtable_alloc,
+				flags);
 
 		phys += next - addr;
 	} while (addr = next, addr != end);
+
+	pmd_clear_fixmap();
 }
 
 static void alloc_init_pud(p4d_t *p4dp, unsigned long addr, unsigned long end,