diff mbox series

[v8,3/8] xen/arm: Fold mmu_init_secondary_cpu() to head.S

Message ID 20231023021345.1731436-4-Henry.Wang@arm.com (mailing list archive)
State Superseded
Headers show
Series xen/arm: Split MMU code as the prepration of MPU work | expand

Commit Message

Henry Wang Oct. 23, 2023, 2:13 a.m. UTC
Currently mmu_init_secondary_cpu() only enforces the page table
should not contain mapping that are both Writable and eXecutables
after boot. To ease the arch/arm/mm.c split work, fold this function
to head.S.

For arm32, introduce an assembly macro pt_enforce_wxn. The macro is
called before secondary CPUs jumping into the C world.

For arm64, set the SCTLR_Axx_ELx_WXN flag right when the MMU is
enabled. This would avoid the extra TLB flush and SCTLR dance.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
Co-authored-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
---
v8:
- Change the setting of SCTLR_Axx_ELx_WXN for arm64 to set the
  flag right when the MMU is enabled.
v7:
- No change.
v6:
- New patch.
---
 xen/arch/arm/arm32/head.S     | 20 ++++++++++++++++++++
 xen/arch/arm/arm64/mmu/head.S | 18 +++++++++++-------
 xen/arch/arm/include/asm/mm.h |  2 --
 xen/arch/arm/mm.c             |  6 ------
 xen/arch/arm/smpboot.c        |  2 --
 5 files changed, 31 insertions(+), 17 deletions(-)

Comments

Julien Grall Oct. 31, 2023, 6:29 p.m. UTC | #1
Hi Henry,

+Ayan

On 23/10/2023 03:13, Henry Wang wrote:
> Currently mmu_init_secondary_cpu() only enforces the page table
> should not contain mapping that are both Writable and eXecutables
> after boot. To ease the arch/arm/mm.c split work, fold this function
> to head.S.
> 
> For arm32, introduce an assembly macro pt_enforce_wxn. The macro is
> called before secondary CPUs jumping into the C world.
> 
> For arm64, set the SCTLR_Axx_ELx_WXN flag right when the MMU is
> enabled. This would avoid the extra TLB flush and SCTLR dance.

For a random reader, it is not clear why you can't set WnX early for 
arm32 as well. I think it would helpful to explain the difference. I.e. 
at the point the MMU is enabled, the page-tables may still contain 
mapping which are writable and executable.

> 
> Signed-off-by: Henry Wang <Henry.Wang@arm.com>
> Co-authored-by: Julien Grall <jgrall@amazon.com>
> Signed-off-by: Julien Grall <jgrall@amazon.com>
> ---
> v8:
> - Change the setting of SCTLR_Axx_ELx_WXN for arm64 to set the
>    flag right when the MMU is enabled.
> v7:
> - No change.
> v6:
> - New patch.
> ---
>   xen/arch/arm/arm32/head.S     | 20 ++++++++++++++++++++
>   xen/arch/arm/arm64/mmu/head.S | 18 +++++++++++-------
>   xen/arch/arm/include/asm/mm.h |  2 --
>   xen/arch/arm/mm.c             |  6 ------
>   xen/arch/arm/smpboot.c        |  2 --
>   5 files changed, 31 insertions(+), 17 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 33b038e7e0..39218cf15f 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -83,6 +83,25 @@
>           isb
>   .endm
>   
> +/*
> + * Enforce Xen page-tables do not contain mapping that are both
> + * Writable and eXecutables.
> + *
> + * This should be called on each secondary CPU.
> + */
> +.macro pt_enforce_wxn tmp
> +        mrc   CP32(\tmp, HSCTLR)
> +        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
> +        dsb
> +        mcr   CP32(\tmp, HSCTLR)
> +        /*
> +         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
> +         * before flushing the TLBs.
> +         */
> +        isb
> +        flush_xen_tlb_local \tmp
> +.endm
> +
>   /*
>    * Common register usage in this file:
>    *   r0  -
> @@ -254,6 +273,7 @@ secondary_switched:
>           /* Use a virtual address to access the UART. */
>           mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
>   #endif
> +        pt_enforce_wxn r0

 From recent discussion on IRC, Ayan reminded me this patch [1]. 
Ideally, I would want to print a message just before to indicate that 
the bit is set. But I understand that this would need to be droppped in 
Ayan rework as we don't yet support early printk in enable_mmu().

While debugging an MMU issue on Arm32, I wrote a patch to sprinkle 
prints in the enable_mmu() code. I will clean-up the patch and send it.

I will add a print at that point. Meanwhile, I would move the call a few 
lines above? This will allow Ayan to drop [1].

>           PRINT("- Ready -\r\n")
>           /* Jump to C world */
>           mov_w r2, start_secondary
> diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
> index 88075ef083..df06cefbbe 100644
> --- a/xen/arch/arm/arm64/mmu/head.S
> +++ b/xen/arch/arm/arm64/mmu/head.S
> @@ -264,10 +264,11 @@ ENDPROC(create_page_tables)
>    * Inputs:
>    *   x0 : Physical address of the page tables.

The inputs list should be updated to mention what x1 means.

>    *
> - * Clobbers x0 - x4
> + * Clobbers x0 - x6

Below, you only seem to introduce x5. So shouldn't this be: "Clobbers x0 
- x5"?

>    */
>   enable_mmu:
>           mov   x4, x0
> +        mov   x5, x1
>           PRINT("- Turning on paging -\r\n")
>   
>           /*
> @@ -283,6 +284,7 @@ enable_mmu:
>           mrs   x0, SCTLR_EL2
>           orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
>           orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
> +        orr   x0, x0, x5                /* Enable extra flags */
>           dsb   sy                     /* Flush PTE writes and finish reads */
>           msr   SCTLR_EL2, x0          /* now paging is enabled */
>           isb                          /* Now, flush the icache */
> @@ -297,16 +299,17 @@ ENDPROC(enable_mmu)
>    * Inputs:
>    *   lr : Virtual address to return to. >    *
> - * Clobbers x0 - x5
> + * Clobbers x0 - x6
>    */
>   ENTRY(enable_secondary_cpu_mm)
> -        mov   x5, lr
> +        mov   x6, lr
>   
>           load_paddr x0, init_ttbr
>           ldr   x0, [x0]
>   
> +        mov   x1, #SCTLR_Axx_ELx_WXN        /* Enable WxN from the start */
>           bl    enable_mmu
> -        mov   lr, x5
> +        mov   lr, x6
>   
>           /* Return to the virtual address requested by the caller. */
>           ret
> @@ -320,14 +323,15 @@ ENDPROC(enable_secondary_cpu_mm)
>    * Inputs:
>    *   lr : Virtual address to return to.
>    *
> - * Clobbers x0 - x5
> + * Clobbers x0 - x6
>    */
>   ENTRY(enable_boot_cpu_mm)
> -        mov   x5, lr
> +        mov   x6, lr
>   
>           bl    create_page_tables
>           load_paddr x0, boot_pgtable
>   
> +        mov   x1, #0        /* No extra SCTLR flags */
>           bl    enable_mmu
>   
>           /*
> @@ -337,7 +341,7 @@ ENTRY(enable_boot_cpu_mm)
>           ldr   x0, =1f
>           br    x0
>   1:
> -        mov   lr, x5
> +        mov   lr, x6
>           /*
>            * The 1:1 map may clash with other parts of the Xen virtual memory
>            * layout. As it is not used anymore, remove it completely to
> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
> index d25e59f828..163d22ecd3 100644
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -214,8 +214,6 @@ extern void remove_early_mappings(void);
>   /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
>    * new page table */
>   extern int init_secondary_pagetables(int cpu);
> -/* Switch secondary CPUS to its own pagetables and finalise MMU setup */
> -extern void mmu_init_secondary_cpu(void);
>   /*
>    * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
>    * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index b7eb3a6e08..923a90925c 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -326,12 +326,6 @@ void __init setup_pagetables(unsigned long boot_phys_offset)
>   #endif
>   }
>   
> -/* MMU setup for secondary CPUS (which already have paging enabled) */
> -void mmu_init_secondary_cpu(void)
> -{
> -    xen_pt_enforce_wnx();
> -}
> -
>   #ifdef CONFIG_ARM_32
>   /*
>    * Set up the direct-mapped xenheap:
> diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> index ec76de3cac..beb137d06e 100644
> --- a/xen/arch/arm/smpboot.c
> +++ b/xen/arch/arm/smpboot.c
> @@ -361,8 +361,6 @@ void start_secondary(void)
>        */
>       update_system_features(&current_cpu_data);
>   
> -    mmu_init_secondary_cpu();
> -
>       gic_init_secondary_cpu();
>   
>       set_current(idle_vcpu[cpuid]);

Cheers,

[1] 
https://lore.kernel.org/all/20230911135942.791206-2-ayan.kumar.halder@amd.com/
Henry Wang Nov. 1, 2023, 1:58 a.m. UTC | #2
Hi Julien,

> On Nov 1, 2023, at 02:29, Julien Grall <julien@xen.org> wrote:
> 
> Hi Henry,
> 
> +Ayan
> 
> On 23/10/2023 03:13, Henry Wang wrote:
>> Currently mmu_init_secondary_cpu() only enforces the page table
>> should not contain mapping that are both Writable and eXecutables
>> after boot. To ease the arch/arm/mm.c split work, fold this function
>> to head.S.
>> For arm32, introduce an assembly macro pt_enforce_wxn. The macro is
>> called before secondary CPUs jumping into the C world.
>> For arm64, set the SCTLR_Axx_ELx_WXN flag right when the MMU is
>> enabled. This would avoid the extra TLB flush and SCTLR dance.
> 
> For a random reader, it is not clear why you can't set WnX early for arm32 as well. I think it would helpful to explain the difference. I.e. at the point the MMU is enabled, the page-tables may still contain mapping which are writable and executable.

Sounds good, I will add the suggested sentence.

>>  .endm
>>  +/*
>> + * Enforce Xen page-tables do not contain mapping that are both
>> + * Writable and eXecutables.
>> + *
>> + * This should be called on each secondary CPU.
>> + */
>> +.macro pt_enforce_wxn tmp
>> +        mrc   CP32(\tmp, HSCTLR)
>> +        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
>> +        dsb
>> +        mcr   CP32(\tmp, HSCTLR)
>> +        /*
>> +         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
>> +         * before flushing the TLBs.
>> +         */
>> +        isb
>> +        flush_xen_tlb_local \tmp
>> +.endm
>> +
>>  /*
>>   * Common register usage in this file:
>>   *   r0  -
>> @@ -254,6 +273,7 @@ secondary_switched:
>>          /* Use a virtual address to access the UART. */
>>          mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
>>  #endif
>> +        pt_enforce_wxn r0
> 
> From recent discussion on IRC, Ayan reminded me this patch [1]. Ideally, I would want to print a message just before to indicate that the bit is set. But I understand that this would need to be droppped in Ayan rework as we don't yet support early printk in enable_mmu().
> 
> While debugging an MMU issue on Arm32, I wrote a patch to sprinkle prints in the enable_mmu() code. I will clean-up the patch and send it.

Just to make sure, your patch is for both Arm32 and Arm64, is my understanding correct?
If it is only for Arm32, do you need me adding the print for Arm64 as well in this patch?

> I will add a print at that point. Meanwhile, I would move the call a few lines above? This will allow Ayan to drop [1].

Yeah I will include Ayan’s change in this patch and add his sign-off.

>>          PRINT("- Ready -\r\n")
>>          /* Jump to C world */
>>          mov_w r2, start_secondary
>> diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
>> index 88075ef083..df06cefbbe 100644
>> --- a/xen/arch/arm/arm64/mmu/head.S
>> +++ b/xen/arch/arm/arm64/mmu/head.S
>> @@ -264,10 +264,11 @@ ENDPROC(create_page_tables)
>>   * Inputs:
>>   *   x0 : Physical address of the page tables.
> 
> The inputs list should be updated to mention what x1 means.

I will use “x1: Extra flags of the SCTLR.” if this looks good to you.

>>   *
>> - * Clobbers x0 - x4
>> + * Clobbers x0 - x6
> 
> Below, you only seem to introduce x5. So shouldn't this be: "Clobbers x0 - x5"?

Hmmm yes you are correct, I blindly copied the code from [2]. Sorry for the mess, I will
correct it in v9.

> Cheers,
> 
> [1] https://lore.kernel.org/all/20230911135942.791206-2-ayan.kumar.halder@amd.com/

[2] https://lore.kernel.org/xen-devel/4d7a9849-8990-8ddd-3531-93f4e2e262b1@xen.org/

Kind regards,
Henry

> 
> -- 
> Julien Grall
Julien Grall Nov. 1, 2023, 12:05 p.m. UTC | #3
On 01/11/2023 01:58, Henry Wang wrote:
> Hi Julien,

Hi,

>> On Nov 1, 2023, at 02:29, Julien Grall <julien@xen.org> wrote:
>>
>> Hi Henry,
>>
>> +Ayan
>>
>> On 23/10/2023 03:13, Henry Wang wrote:
>>> Currently mmu_init_secondary_cpu() only enforces the page table
>>> should not contain mapping that are both Writable and eXecutables
>>> after boot. To ease the arch/arm/mm.c split work, fold this function
>>> to head.S.
>>> For arm32, introduce an assembly macro pt_enforce_wxn. The macro is
>>> called before secondary CPUs jumping into the C world.
>>> For arm64, set the SCTLR_Axx_ELx_WXN flag right when the MMU is
>>> enabled. This would avoid the extra TLB flush and SCTLR dance.
>>
>> For a random reader, it is not clear why you can't set WnX early for arm32 as well. I think it would helpful to explain the difference. I.e. at the point the MMU is enabled, the page-tables may still contain mapping which are writable and executable.
> 
> Sounds good, I will add the suggested sentence.
> 
>>>   .endm
>>>   +/*
>>> + * Enforce Xen page-tables do not contain mapping that are both
>>> + * Writable and eXecutables.
>>> + *
>>> + * This should be called on each secondary CPU.
>>> + */
>>> +.macro pt_enforce_wxn tmp
>>> +        mrc   CP32(\tmp, HSCTLR)
>>> +        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
>>> +        dsb
>>> +        mcr   CP32(\tmp, HSCTLR)
>>> +        /*
>>> +         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
>>> +         * before flushing the TLBs.
>>> +         */
>>> +        isb
>>> +        flush_xen_tlb_local \tmp
>>> +.endm
>>> +
>>>   /*
>>>    * Common register usage in this file:
>>>    *   r0  -
>>> @@ -254,6 +273,7 @@ secondary_switched:
>>>           /* Use a virtual address to access the UART. */
>>>           mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
>>>   #endif
>>> +        pt_enforce_wxn r0
>>
>>  From recent discussion on IRC, Ayan reminded me this patch [1]. Ideally, I would want to print a message just before to indicate that the bit is set. But I understand that this would need to be droppped in Ayan rework as we don't yet support early printk in enable_mmu().
>>
>> While debugging an MMU issue on Arm32, I wrote a patch to sprinkle prints in the enable_mmu() code. I will clean-up the patch and send it.
> 
> Just to make sure, your patch is for both Arm32 and Arm64, is my understanding correct?

No it is only for arm32.

> If it is only for Arm32, do you need me adding the print for Arm64 as well in this patch?

No need. For arm64, we will enable WnX at the same time as the MMU. So 
we are already covered by the other prints.

> 
>> I will add a print at that point. Meanwhile, I would move the call a few lines above? This will allow Ayan to drop [1].
> 
> Yeah I will include Ayan’s change in this patch and add his sign-off.
> 
>>>           PRINT("- Ready -\r\n")
>>>           /* Jump to C world */
>>>           mov_w r2, start_secondary
>>> diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
>>> index 88075ef083..df06cefbbe 100644
>>> --- a/xen/arch/arm/arm64/mmu/head.S
>>> +++ b/xen/arch/arm/arm64/mmu/head.S
>>> @@ -264,10 +264,11 @@ ENDPROC(create_page_tables)
>>>    * Inputs:
>>>    *   x0 : Physical address of the page tables.
>>
>> The inputs list should be updated to mention what x1 means.
> 
> I will use “x1: Extra flags of the SCTLR.” if this looks good to you.

I am fine with that.

Cheers,
Henry Wang Nov. 1, 2023, 12:28 p.m. UTC | #4
Hi Julien,

> On Nov 1, 2023, at 20:05, Julien Grall <julien@xen.org> wrote:
>>> 
>>> While debugging an MMU issue on Arm32, I wrote a patch to sprinkle prints in the enable_mmu() code. I will clean-up the patch and send it.
>> Just to make sure, your patch is for both Arm32 and Arm64, is my understanding correct?
> 
> No it is only for arm32.
> 
>> If it is only for Arm32, do you need me adding the print for Arm64 as well in this patch?
> 
> No need. For arm64, we will enable WnX at the same time as the MMU. So we are already covered by the other prints.

Ok, thanks for the explanation.

Kind regards,
Henry
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 33b038e7e0..39218cf15f 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -83,6 +83,25 @@ 
         isb
 .endm
 
+/*
+ * Enforce Xen page-tables do not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each secondary CPU.
+ */
+.macro pt_enforce_wxn tmp
+        mrc   CP32(\tmp, HSCTLR)
+        orr   \tmp, \tmp, #SCTLR_Axx_ELx_WXN
+        dsb
+        mcr   CP32(\tmp, HSCTLR)
+        /*
+         * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+         * before flushing the TLBs.
+         */
+        isb
+        flush_xen_tlb_local \tmp
+.endm
+
 /*
  * Common register usage in this file:
  *   r0  -
@@ -254,6 +273,7 @@  secondary_switched:
         /* Use a virtual address to access the UART. */
         mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
 #endif
+        pt_enforce_wxn r0
         PRINT("- Ready -\r\n")
         /* Jump to C world */
         mov_w r2, start_secondary
diff --git a/xen/arch/arm/arm64/mmu/head.S b/xen/arch/arm/arm64/mmu/head.S
index 88075ef083..df06cefbbe 100644
--- a/xen/arch/arm/arm64/mmu/head.S
+++ b/xen/arch/arm/arm64/mmu/head.S
@@ -264,10 +264,11 @@  ENDPROC(create_page_tables)
  * Inputs:
  *   x0 : Physical address of the page tables.
  *
- * Clobbers x0 - x4
+ * Clobbers x0 - x6
  */
 enable_mmu:
         mov   x4, x0
+        mov   x5, x1
         PRINT("- Turning on paging -\r\n")
 
         /*
@@ -283,6 +284,7 @@  enable_mmu:
         mrs   x0, SCTLR_EL2
         orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
         orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
+        orr   x0, x0, x5                /* Enable extra flags */
         dsb   sy                     /* Flush PTE writes and finish reads */
         msr   SCTLR_EL2, x0          /* now paging is enabled */
         isb                          /* Now, flush the icache */
@@ -297,16 +299,17 @@  ENDPROC(enable_mmu)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_secondary_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr
 
         load_paddr x0, init_ttbr
         ldr   x0, [x0]
 
+        mov   x1, #SCTLR_Axx_ELx_WXN        /* Enable WxN from the start */
         bl    enable_mmu
-        mov   lr, x5
+        mov   lr, x6
 
         /* Return to the virtual address requested by the caller. */
         ret
@@ -320,14 +323,15 @@  ENDPROC(enable_secondary_cpu_mm)
  * Inputs:
  *   lr : Virtual address to return to.
  *
- * Clobbers x0 - x5
+ * Clobbers x0 - x6
  */
 ENTRY(enable_boot_cpu_mm)
-        mov   x5, lr
+        mov   x6, lr
 
         bl    create_page_tables
         load_paddr x0, boot_pgtable
 
+        mov   x1, #0        /* No extra SCTLR flags */
         bl    enable_mmu
 
         /*
@@ -337,7 +341,7 @@  ENTRY(enable_boot_cpu_mm)
         ldr   x0, =1f
         br    x0
 1:
-        mov   lr, x5
+        mov   lr, x6
         /*
          * The 1:1 map may clash with other parts of the Xen virtual memory
          * layout. As it is not used anymore, remove it completely to
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index d25e59f828..163d22ecd3 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -214,8 +214,6 @@  extern void remove_early_mappings(void);
 /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
  * new page table */
 extern int init_secondary_pagetables(int cpu);
-/* Switch secondary CPUS to its own pagetables and finalise MMU setup */
-extern void mmu_init_secondary_cpu(void);
 /*
  * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
  * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index b7eb3a6e08..923a90925c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -326,12 +326,6 @@  void __init setup_pagetables(unsigned long boot_phys_offset)
 #endif
 }
 
-/* MMU setup for secondary CPUS (which already have paging enabled) */
-void mmu_init_secondary_cpu(void)
-{
-    xen_pt_enforce_wnx();
-}
-
 #ifdef CONFIG_ARM_32
 /*
  * Set up the direct-mapped xenheap:
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index ec76de3cac..beb137d06e 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -361,8 +361,6 @@  void start_secondary(void)
      */
     update_system_features(&current_cpu_data);
 
-    mmu_init_secondary_cpu();
-
     gic_init_secondary_cpu();
 
     set_current(idle_vcpu[cpuid]);