diff mbox series

[v3,24/28] xen/arm: Zero BSS after the MMU and D-cache is turned on

Message ID 20190812173019.11956-25-julien.grall@arm.com (mailing list archive)
State Superseded
Headers show
Series xen/arm: Rework head.S to make it more compliant with the Arm Arm | expand

Commit Message

Julien Grall Aug. 12, 2019, 5:30 p.m. UTC
At the moment BSS is zeroed before the MMU and D-Cache is turned on.
In other words, the cache will be bypassed when zeroing the BSS section.

On Arm64, per the Image protocol [1], the state of the cache for BSS region
is not known because it is not part of the "loaded kernel image".

On Arm32, the boot protocol [2] does not mention anything about the
state of the cache. Therefore, it should be assumed that it is not known
for BSS region.

This means that the cache will need to be invalidated twice for the BSS
region:
    1) Before zeroing to remove any dirty cache line. Otherwise they may
    get evicted while zeroing and therefore overriding the value.
    2) After zeroing to remove any cache line that may have been
    speculated. Otherwise when turning on MMU and D-Cache, the CPU may
    see old values.

At the moment, the only reason to have BSS zeroed early is because the
boot page tables are part of it. To avoid the two cache invalidations,
it would be better if the boot page tables are part of the "loaded
kernel image" and therefore be zeroed when loading the image into
memory. A good candidate is the section .data.page_aligned.

A new macro DEFINE_BOOT_PAGE_TABLE is introduced to create and mark
page-tables used before BSS is zeroed. This includes all boot_* but also
xen_fixmap as zero_bss() will print a message when earlyprintk is
enabled.

[1] linux/Documentation/arm64/booting.txt
[2] linux/Documentation/arm/Booting

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

---
    Changes in v3:
        - Add Stefano's reviewed-by

    Changes in v2:
        - Add missing signed-off
        - Clarify commit message
        - Add arm32 parts
---
 xen/arch/arm/arm32/head.S | 11 +++--------
 xen/arch/arm/arm64/head.S |  7 +++----
 xen/arch/arm/mm.c         | 23 +++++++++++++++++------
 3 files changed, 23 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 2317fb8855..e86a9f95e7 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -153,7 +153,6 @@  past_zImage:
         mov   r12, #0                /* r12 := is_secondary_cpu */
 
         bl    check_cpu_mode
-        bl    zero_bss
         bl    cpu_init
         bl    create_page_tables
         bl    enable_mmu
@@ -174,6 +173,7 @@  primary_switched:
         /* Use a virtual address to access the UART. */
         mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
 #endif
+        bl    zero_bss
         PRINT("- Ready -\r\n")
         /* Setup the arguments for start_xen and jump to C world */
         mov   r0, r10                /* r0 := Physical offset */
@@ -284,17 +284,12 @@  ENDPROC(check_cpu_mode)
 /*
  * Zero BSS
  *
- * Inputs:
- *   r10: Physical offset
- *
  * Clobbers r0 - r3
  */
 zero_bss:
         PRINT("- Zero BSS -\r\n")
-        ldr   r0, =__bss_start       /* Load start & end of bss */
-        ldr   r1, =__bss_end
-        add   r0, r0, r10            /* Apply physical offset */
-        add   r1, r1, r10
+        ldr   r0, =__bss_start       /* r0 := vaddr(__bss_start) */
+        ldr   r1, =__bss_end         /* r1 := vaddr(__bss_start) */
 
         mov   r2, #0
 1:      str   r2, [r0], #4
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index a6f3aa4ee5..f2a0e1d3b0 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -309,7 +309,6 @@  real_start_efi:
         mov   x22, #0                /* x22 := is_secondary_cpu */
 
         bl    check_cpu_mode
-        bl    zero_bss
         bl    cpu_init
         bl    create_page_tables
         bl    enable_mmu
@@ -330,6 +329,7 @@  primary_switched:
         /* Use a virtual address to access the UART. */
         ldr   x23, =EARLY_UART_VIRTUAL_ADDRESS
 #endif
+        bl    zero_bss
         PRINT("- Ready -\r\n")
         /* Setup the arguments for start_xen and jump to C world */
         mov   x0, x20                /* x0 := Physical offset */
@@ -432,7 +432,6 @@  ENDPROC(check_cpu_mode)
  * Zero BSS
  *
  * Inputs:
- *   x20: Physical offset
  *   x26: Do we need to zero BSS?
  *
  * Clobbers x0 - x3
@@ -442,8 +441,8 @@  zero_bss:
         cbnz  x26, skip_bss
 
         PRINT("- Zero BSS -\r\n")
-        load_paddr x0, __bss_start    /* Load paddr of start & end of bss */
-        load_paddr x1, __bss_end
+        ldr   x0, =__bss_start       /* x0 := vaddr(__bss_start) */
+        ldr   x1, =__bss_end         /* x1 := vaddr(__bss_start) */
 
 1:      str   xzr, [x0], #8
         cmp   x0, x1
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index e1cdeaaf2f..65552da4ba 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -62,6 +62,17 @@  mm_printk(const char *fmt, ...) {}
     } while (0);
 #endif
 
+/*
+ * Macros to define page-tables:
+ *  - DEFINE_BOOT_PAGE_TABLE is used to define page-table that are used
+ *  in assembly code before BSS is zeroed.
+ *  - DEFINE_PAGE_TABLE{,S} are used to define one or multiple
+ *  page-tables to be used after BSS is zeroed (typically they are only used
+ *  in C).
+ */
+#define DEFINE_BOOT_PAGE_TABLE(name)                                          \
+lpae_t __aligned(PAGE_SIZE) __section(".data.page_aligned") name[LPAE_ENTRIES]
+
 #define DEFINE_PAGE_TABLES(name, nr)                    \
 lpae_t __aligned(PAGE_SIZE) name[LPAE_ENTRIES * (nr)]
 
@@ -90,13 +101,13 @@  lpae_t __aligned(PAGE_SIZE) name[LPAE_ENTRIES * (nr)]
  * Finally, if EARLY_PRINTK is enabled then xen_fixmap will be mapped
  * by the CPU once it has moved off the 1:1 mapping.
  */
-DEFINE_PAGE_TABLE(boot_pgtable);
+DEFINE_BOOT_PAGE_TABLE(boot_pgtable);
 #ifdef CONFIG_ARM_64
-DEFINE_PAGE_TABLE(boot_first);
-DEFINE_PAGE_TABLE(boot_first_id);
+DEFINE_BOOT_PAGE_TABLE(boot_first);
+DEFINE_BOOT_PAGE_TABLE(boot_first_id);
 #endif
-DEFINE_PAGE_TABLE(boot_second);
-DEFINE_PAGE_TABLE(boot_third);
+DEFINE_BOOT_PAGE_TABLE(boot_second);
+DEFINE_BOOT_PAGE_TABLE(boot_third);
 
 /* Main runtime page tables */
 
@@ -149,7 +160,7 @@  static __initdata int xenheap_first_first_slot = -1;
  */
 static DEFINE_PAGE_TABLES(xen_second, 2);
 /* First level page table used for fixmap */
-DEFINE_PAGE_TABLE(xen_fixmap);
+DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
 /* First level page table used to map Xen itself with the XN bit set
  * as appropriate. */
 static DEFINE_PAGE_TABLE(xen_xenmap);