diff mbox series

[v3,07/18] xen/arm32: head: Jump to the runtime mapping in enable_mmu()

Message ID 20221212095523.52683-8-julien@xen.org (mailing list archive)
State Superseded
Headers show
Series xen/arm: Don't switch TTBR while the MMU is on | expand

Commit Message

Julien Grall Dec. 12, 2022, 9:55 a.m. UTC
From: Julien Grall <jgrall@amazon.com>

At the moment, enable_mmu() will return to an address in the 1:1 mapping
and each path is responsible to switch to the runtime mapping.

In a follow-up patch, the behavior to switch to the runtime mapping
will become more complex. So to avoid more code/comment duplication,
move the switch in enable_mmu().

Lastly, take the opportunity to replace load from literal pool with
mov_w.

Signed-off-by: Julien Grall <jgrall@amazon.com>

---
    Changes in v3:
        - Fix typo in the commit message

    Changes in v2:
        - Patch added
---
 xen/arch/arm/arm32/head.S | 50 +++++++++++++++++++++++----------------
 1 file changed, 30 insertions(+), 20 deletions(-)

Comments

Stefano Stabellini Dec. 13, 2022, 12:46 a.m. UTC | #1
On Mon, 12 Dec 2022, Julien Grall wrote:
> From: Julien Grall <jgrall@amazon.com>
> 
> At the moment, enable_mmu() will return to an address in the 1:1 mapping
> and each path is responsible to switch to the runtime mapping.
> 
> In a follow-up patch, the behavior to switch to the runtime mapping
> will become more complex. So to avoid more code/comment duplication,
> move the switch in enable_mmu().
> 
> Lastly, take the opportunity to replace load from literal pool with
> mov_w.
> 
> Signed-off-by: Julien Grall <jgrall@amazon.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

> ---
>     Changes in v3:
>         - Fix typo in the commit message
> 
>     Changes in v2:
>         - Patch added
> ---
>  xen/arch/arm/arm32/head.S | 50 +++++++++++++++++++++++----------------
>  1 file changed, 30 insertions(+), 20 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index ce680be91be1..40c1d7502007 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -167,19 +167,11 @@ past_zImage:
>          bl    check_cpu_mode
>          bl    cpu_init
>          bl    create_page_tables
> -        bl    enable_mmu
>  
> -        /* We are still in the 1:1 mapping. Jump to the runtime Virtual Address. */
> -        mov_w r0, primary_switched
> -        mov   pc, r0
> +        /* Address in the runtime mapping to jump to after the MMU is enabled */
> +        mov_w lr, primary_switched
> +        b     enable_mmu
>  primary_switched:
> -        /*
> -         * The 1:1 map may clash with other parts of the Xen virtual memory
> -         * layout. As it is not used anymore, remove it completely to
> -         * avoid having to worry about replacing existing mapping
> -         * afterwards.
> -         */
> -        bl    remove_identity_mapping
>          bl    setup_fixmap
>  #ifdef CONFIG_EARLY_PRINTK
>          /* Use a virtual address to access the UART. */
> @@ -223,12 +215,10 @@ GLOBAL(init_secondary)
>          bl    check_cpu_mode
>          bl    cpu_init
>          bl    create_page_tables
> -        bl    enable_mmu
>  
> -
> -        /* We are still in the 1:1 mapping. Jump to the runtime Virtual Address. */
> -        mov_w r0, secondary_switched
> -        mov   pc, r0
> +        /* Address in the runtime mapping to jump to after the MMU is enabled */
> +        mov_w lr, secondary_switched
> +        b     enable_mmu
>  secondary_switched:
>          /*
>           * Non-boot CPUs need to move on to the proper pagetables, which were
> @@ -523,9 +513,12 @@ virtphys_clash:
>  ENDPROC(create_page_tables)
>  
>  /*
> - * Turn on the Data Cache and the MMU. The function will return on the 1:1
> - * mapping. In other word, the caller is responsible to switch to the runtime
> - * mapping.
> + * Turn on the Data Cache and the MMU. The function will return
> + * to the virtual address provided in LR (e.g. the runtime mapping).
> + *
> + * Inputs:
> + *   r9 : paddr(start)
> + *   lr : Virtual address to return to
>   *
>   * Clobbers r0 - r3
>   */
> @@ -551,7 +544,24 @@ enable_mmu:
>          dsb                          /* Flush PTE writes and finish reads */
>          mcr   CP32(r0, HSCTLR)       /* now paging is enabled */
>          isb                          /* Now, flush the icache */
> -        mov   pc, lr
> +
> +        /*
> +         * The MMU is turned on and we are in the 1:1 mapping. Switch
> +         * to the runtime mapping.
> +         */
> +        mov_w r0, 1f
> +        mov   pc, r0
> +1:
> +        /*
> +         * The 1:1 map may clash with other parts of the Xen virtual memory
> +         * layout. As it is not used anymore, remove it completely to
> +         * avoid having to worry about replacing existing mapping
> +         * afterwards.
> +         *
> +         * On return this will jump to the virtual address requested by
> +         * the caller.
> +         */
> +        b     remove_identity_mapping
>  ENDPROC(enable_mmu)
>  
>  /*
> -- 
> 2.38.1
>
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index ce680be91be1..40c1d7502007 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -167,19 +167,11 @@  past_zImage:
         bl    check_cpu_mode
         bl    cpu_init
         bl    create_page_tables
-        bl    enable_mmu
 
-        /* We are still in the 1:1 mapping. Jump to the runtime Virtual Address. */
-        mov_w r0, primary_switched
-        mov   pc, r0
+        /* Address in the runtime mapping to jump to after the MMU is enabled */
+        mov_w lr, primary_switched
+        b     enable_mmu
 primary_switched:
-        /*
-         * The 1:1 map may clash with other parts of the Xen virtual memory
-         * layout. As it is not used anymore, remove it completely to
-         * avoid having to worry about replacing existing mapping
-         * afterwards.
-         */
-        bl    remove_identity_mapping
         bl    setup_fixmap
 #ifdef CONFIG_EARLY_PRINTK
         /* Use a virtual address to access the UART. */
@@ -223,12 +215,10 @@  GLOBAL(init_secondary)
         bl    check_cpu_mode
         bl    cpu_init
         bl    create_page_tables
-        bl    enable_mmu
 
-
-        /* We are still in the 1:1 mapping. Jump to the runtime Virtual Address. */
-        mov_w r0, secondary_switched
-        mov   pc, r0
+        /* Address in the runtime mapping to jump to after the MMU is enabled */
+        mov_w lr, secondary_switched
+        b     enable_mmu
 secondary_switched:
         /*
          * Non-boot CPUs need to move on to the proper pagetables, which were
@@ -523,9 +513,12 @@  virtphys_clash:
 ENDPROC(create_page_tables)
 
 /*
- * Turn on the Data Cache and the MMU. The function will return on the 1:1
- * mapping. In other word, the caller is responsible to switch to the runtime
- * mapping.
+ * Turn on the Data Cache and the MMU. The function will return
+ * to the virtual address provided in LR (e.g. the runtime mapping).
+ *
+ * Inputs:
+ *   r9 : paddr(start)
+ *   lr : Virtual address to return to
  *
  * Clobbers r0 - r3
  */
@@ -551,7 +544,24 @@  enable_mmu:
         dsb                          /* Flush PTE writes and finish reads */
         mcr   CP32(r0, HSCTLR)       /* now paging is enabled */
         isb                          /* Now, flush the icache */
-        mov   pc, lr
+
+        /*
+         * The MMU is turned on and we are in the 1:1 mapping. Switch
+         * to the runtime mapping.
+         */
+        mov_w r0, 1f
+        mov   pc, r0
+1:
+        /*
+         * The 1:1 map may clash with other parts of the Xen virtual memory
+         * layout. As it is not used anymore, remove it completely to
+         * avoid having to worry about replacing existing mapping
+         * afterwards.
+         *
+         * On return this will jump to the virtual address requested by
+         * the caller.
+         */
+        b     remove_identity_mapping
 ENDPROC(enable_mmu)
 
 /*