diff mbox series

[v4,09/14] xen/arm32: head: Remove restriction where to load Xen

Message ID 20230113101136.479-10-julien@xen.org (mailing list archive)
State Superseded
Headers show
Series xen/arm: Don't switch TTBR while the MMU is on | expand

Commit Message

Julien Grall Jan. 13, 2023, 10:11 a.m. UTC
From: Julien Grall <jgrall@amazon.com>

At the moment, bootloaders can load Xen anywhere in memory but the
region 2MB - 4MB. While I am not aware of any issue, we have no way
to tell the bootloader to avoid that region.

In addition to that, in the future, Xen may grow over 2MB if we
enable feature like UBSAN or GCOV. To avoid widening the restriction
on the load address, it would be better to get rid of it.

When the identity mapping is clashing with the Xen runtime mapping,
we need an extra indirection to be able to replace the identity
mapping with the Xen runtime mapping.

Reserve a new memory region that will be used to temporarily map Xen.
For convenience, the new area is re-using the same first slot as the
domheap which is used for per-cpu temporary mapping after a CPU has
booted.

Furthermore, directly map boot_second (which cover Xen and more)
to the temporary area. This will avoid to allocate an extra page-table
for the second-level and will helpful for follow-up patches (we will
want to use the fixmap whilst in the temporary mapping).

Lastly, some part of the code now needs to know whether the temporary
mapping was created. So reserve r12 to store this information.

Signed-off-by: Julien Grall <jgrall@amazon.com>
----
    Changes in v4:
        - Remove spurious newline

    Changes in v3:
        - Remove the ASSERT() in init_domheap_mappings() because it was
          bogus (secondary CPU root tables are initialized to the CPU0
          root table so the entry will be valid). Also, it is not
          related to this patch as the CPU0 root table are rebuilt
          during boot. The ASSERT() will be re-introduced later.

    Changes in v2:
        - Patch added
---
 xen/arch/arm/arm32/head.S         | 139 ++++++++++++++++++++++++++----
 xen/arch/arm/include/asm/config.h |  14 +++
 xen/arch/arm/mm.c                 |  14 +++
 3 files changed, 152 insertions(+), 15 deletions(-)

Comments

Luca Fancellu Jan. 13, 2023, 2:58 p.m. UTC | #1
> On 13 Jan 2023, at 10:11, Julien Grall <julien@xen.org> wrote:
> 
> From: Julien Grall <jgrall@amazon.com>
> 
> At the moment, bootloaders can load Xen anywhere in memory but the
> region 2MB - 4MB. While I am not aware of any issue, we have no way
> to tell the bootloader to avoid that region.
> 
> In addition to that, in the future, Xen may grow over 2MB if we
> enable feature like UBSAN or GCOV. To avoid widening the restriction
> on the load address, it would be better to get rid of it.
> 
> When the identity mapping is clashing with the Xen runtime mapping,
> we need an extra indirection to be able to replace the identity
> mapping with the Xen runtime mapping.
> 
> Reserve a new memory region that will be used to temporarily map Xen.
> For convenience, the new area is re-using the same first slot as the
> domheap which is used for per-cpu temporary mapping after a CPU has
> booted.
> 
> Furthermore, directly map boot_second (which cover Xen and more)
> to the temporary area. This will avoid to allocate an extra page-table
> for the second-level and will helpful for follow-up patches (we will
> want to use the fixmap whilst in the temporary mapping).
> 
> Lastly, some part of the code now needs to know whether the temporary
> mapping was created. So reserve r12 to store this information.
> 
> Signed-off-by: Julien Grall <jgrall@amazon.com>
> ----
> 

Hi Julien,

> 
> +/*
> + * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
> + *
> + * Clobbers r0 - r1

NIT: r0 - r3?

> + */
> +remove_temporary_mapping:
> +        /* r2:r3 := invalid page-table entry */
> +        mov   r2, #0
> +        mov   r3, #0
> +
> +        adr_l r0, boot_pgtable
> +        mov_w r1, TEMPORARY_XEN_VIRT_START
> +        get_table_slot r1, r1, 1     /* r1 := first slot */
> +        lsl   r1, r1, #3             /* r1 := first slot offset */
> +        strd  r2, r3, [r0, r1]
> +
> +        flush_xen_tlb_local r0
> +
> +        mov  pc, lr
> +ENDPROC(remove_temporary_mapping)
> +

The rest looks good to me, I’ve also built for arm64/32 and test this patch on fvp aarch32 mode,
booting Dom0 and creating/running/destroying some guests.

Reviewed-by: Luca Fancellu <luca.fancellu@arm.com>
Tested-by: Luca Fancellu <luca.fancellu@arm.com>

Cheers,
Luca
Michal Orzel Jan. 16, 2023, 8:14 a.m. UTC | #2
Hi Julien,

On 13/01/2023 11:11, Julien Grall wrote:
> 
> 
> From: Julien Grall <jgrall@amazon.com>
> 
> At the moment, bootloaders can load Xen anywhere in memory but the
> region 2MB - 4MB. While I am not aware of any issue, we have no way
> to tell the bootloader to avoid that region.
> 
> In addition to that, in the future, Xen may grow over 2MB if we
> enable feature like UBSAN or GCOV. To avoid widening the restriction
> on the load address, it would be better to get rid of it.
> 
> When the identity mapping is clashing with the Xen runtime mapping,
> we need an extra indirection to be able to replace the identity
> mapping with the Xen runtime mapping.
> 
> Reserve a new memory region that will be used to temporarily map Xen.
> For convenience, the new area is re-using the same first slot as the
> domheap which is used for per-cpu temporary mapping after a CPU has
> booted.
> 
> Furthermore, directly map boot_second (which cover Xen and more)
> to the temporary area. This will avoid to allocate an extra page-table
> for the second-level and will helpful for follow-up patches (we will
> want to use the fixmap whilst in the temporary mapping).
> 
> Lastly, some part of the code now needs to know whether the temporary
> mapping was created. So reserve r12 to store this information.
> 
> Signed-off-by: Julien Grall <jgrall@amazon.com>
> ----
>     Changes in v4:
>         - Remove spurious newline
> 
>     Changes in v3:
>         - Remove the ASSERT() in init_domheap_mappings() because it was
>           bogus (secondary CPU root tables are initialized to the CPU0
>           root table so the entry will be valid). Also, it is not
>           related to this patch as the CPU0 root table are rebuilt
>           during boot. The ASSERT() will be re-introduced later.
> 
>     Changes in v2:
>         - Patch added
> ---
>  xen/arch/arm/arm32/head.S         | 139 ++++++++++++++++++++++++++----
>  xen/arch/arm/include/asm/config.h |  14 +++
>  xen/arch/arm/mm.c                 |  14 +++
>  3 files changed, 152 insertions(+), 15 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 67b910808b74..3800efb44169 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -35,6 +35,9 @@
>  #define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
>  #define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
> 
> +/* Offset between the early boot xen mapping and the runtime xen mapping */
> +#define XEN_TEMPORARY_OFFSET      (TEMPORARY_XEN_VIRT_START - XEN_VIRT_START)
> +
>  #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_EARLY_PRINTK_INC)
>  #include CONFIG_EARLY_PRINTK_INC
>  #endif
> @@ -94,7 +97,7 @@
>   *   r9  - paddr(start)
>   *   r10 - phys offset
>   *   r11 - UART address
> - *   r12 -
> + *   r12 - Temporary mapping created
>   *   r13 - SP
>   *   r14 - LR
>   *   r15 - PC
> @@ -445,6 +448,9 @@ ENDPROC(cpu_init)
>   *   r9 : paddr(start)
>   *   r10: phys offset
>   *
> + * Output:
> + *   r12: Was a temporary mapping created?
> + *
>   * Clobbers r0 - r4, r6
>   *
>   * Register usage within this function:
> @@ -484,7 +490,11 @@ create_page_tables:
>          /*
>           * Setup the 1:1 mapping so we can turn the MMU on. Note that
>           * only the first page of Xen will be part of the 1:1 mapping.
> +         *
> +         * In all the cases, we will link boot_third_id. So create the
> +         * mapping in advance.
>           */
> +        create_mapping_entry boot_third_id, r9, r9
> 
>          /*
>           * Find the first slot used. If the slot is not XEN_FIRST_SLOT,
> @@ -501,8 +511,7 @@ create_page_tables:
>          /*
>           * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the
>           * 1:1 mapping will use its own set of page-tables from the
> -         * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle
> -         * it.
> +         * third level.
>           */
>          get_table_slot r1, r9, 2     /* r1 := second slot */
>          cmp   r1, #XEN_SECOND_SLOT
> @@ -513,13 +522,33 @@ create_page_tables:
>  link_from_second_id:
>          create_table_entry boot_second_id, boot_third_id, r9, 2
>  link_from_third_id:
> -        create_mapping_entry boot_third_id, r9, r9
> +        /* Good news, we are not clashing with Xen virtual mapping */
> +        mov   r12, #0                /* r12 := temporary mapping not created */
>          mov   pc, lr
> 
>  virtphys_clash:
> -        /* Identity map clashes with boot_third, which we cannot handle yet */
> -        PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
> -        b     fail
> +        /*
> +         * The identity map clashes with boot_third. Link boot_first_id and
> +         * map Xen to a temporary mapping. See switch_to_runtime_mapping
> +         * for more details.
> +         */
> +        PRINT("- Virt and Phys addresses clash  -\r\n")
> +        PRINT("- Create temporary mapping -\r\n")
> +
> +        /*
> +         * This will override the link to boot_second in XEN_FIRST_SLOT.
> +         * The page-tables are not live yet. So no need to use
> +         * break-before-make.
> +         */
> +        create_table_entry boot_pgtable, boot_second_id, r9, 1
> +        create_table_entry boot_second_id, boot_third_id, r9, 2
> +
> +        /* Map boot_second (cover Xen mappings) to the temporary 1st slot */
> +        mov_w r0, TEMPORARY_XEN_VIRT_START
> +        create_table_entry boot_pgtable, boot_second, r0, 1
> +
> +        mov   r12, #1                /* r12 := temporary mapping created */
> +        mov   pc, lr
>  ENDPROC(create_page_tables)
> 
>  /*
> @@ -528,9 +557,10 @@ ENDPROC(create_page_tables)
>   *
>   * Inputs:
>   *   r9 : paddr(start)
> + *  r12 : Was the temporary mapping created?
>   *   lr : Virtual address to return to
>   *
> - * Clobbers r0 - r3
> + * Clobbers r0 - r5
>   */
>  enable_mmu:
>          PRINT("- Turning on paging -\r\n")
> @@ -558,21 +588,79 @@ enable_mmu:
>           * The MMU is turned on and we are in the 1:1 mapping. Switch
>           * to the runtime mapping.
>           */
> -        mov_w r0, 1f
> -        mov   pc, r0
> +        mov   r5, lr                /* Save LR before overwritting it */
> +        mov_w lr, 1f                /* Virtual address in the runtime mapping */
> +        b     switch_to_runtime_mapping
>  1:
> +        mov   lr, r5                /* Restore LR */
>          /*
> -         * The 1:1 map may clash with other parts of the Xen virtual memory
> -         * layout. As it is not used anymore, remove it completely to
> -         * avoid having to worry about replacing existing mapping
> -         * afterwards.
> +         * At this point, either the 1:1 map or the temporary mapping
> +         * will be present. The former may clash with other parts of the
> +         * Xen virtual memory layout. As both of them are not used
> +         * anymore, remove them completely to avoid having to worry
> +         * about replacing existing mapping afterwards.
>           *
>           * On return this will jump to the virtual address requested by
>           * the caller.
>           */
> -        b     remove_identity_mapping
> +        teq   r12, #0
> +        beq   remove_identity_mapping
> +        b     remove_temporary_mapping
>  ENDPROC(enable_mmu)
> 
> +/*
> + * Switch to the runtime mapping. The logic depends on whether the
> + * runtime virtual region is clashing with the physical address
> + *
> + *  - If it is not clashing, we can directly jump to the address in
> + *    the runtime mapping.
> + *  - If it is clashing, create_page_tables() would have mapped Xen to
> + *    a temporary virtual address. We need to switch to the temporary
> + *    mapping so we can remove the identity mapping and map Xen at the
> + *    correct position.
> + *
> + * Inputs
> + *    r9: paddr(start)
> + *   r12: Was a temporary mapping created?
> + *    lr: Address in the runtime mapping to jump to
> + *
> + * Clobbers r0 - r4
> + */
> +switch_to_runtime_mapping:
> +        /*
> +         * Jump to the runtime mapping if the virt and phys are not
> +         * clashing
> +         */
> +        teq   r12, #0
> +        beq   ready_to_switch
> +
> +        /* We are still in the 1:1 mapping. Jump to the temporary Virtual address. */
> +        mov_w r0, 1f
> +        add   r0, r0, #XEN_TEMPORARY_OFFSET /* r0 := address in temporary mapping */
> +        mov   pc, r0
> +
> +1:
> +        /* Remove boot_second_id */
> +        mov   r2, #0
> +        mov   r3, #0
> +        adr_l r0, boot_pgtable
> +        get_table_slot r1, r9, 1            /* r1 := first slot */
> +        lsl   r1, r1, #3                    /* r1 := first slot offset */
> +        strd  r2, r3, [r0, r1]
> +
> +        flush_xen_tlb_local r0
> +
> +        /* Map boot_second into boot_pgtable */
> +        mov_w r0, XEN_VIRT_START
> +        create_table_entry boot_pgtable, boot_second, r0, 1
> +
> +        /* Ensure any page table updates are visible before continuing */
> +        dsb   nsh
> +
> +ready_to_switch:
> +        mov   pc, lr
> +ENDPROC(switch_to_runtime_mapping)
> +
>  /*
>   * Remove the 1:1 map from the page-tables. It is not easy to keep track
>   * where the 1:1 map was mapped, so we will look for the top-level entry
> @@ -618,6 +706,27 @@ identity_mapping_removed:
>          mov   pc, lr
>  ENDPROC(remove_identity_mapping)
> 
> +/*
> + * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
> + *
> + * Clobbers r0 - r1
> + */
> +remove_temporary_mapping:
> +        /* r2:r3 := invalid page-table entry */
> +        mov   r2, #0
> +        mov   r3, #0
> +
> +        adr_l r0, boot_pgtable
> +        mov_w r1, TEMPORARY_XEN_VIRT_START
> +        get_table_slot r1, r1, 1     /* r1 := first slot */
Can't we just use TEMPORARY_AREA_FIRST_SLOT?

> +        lsl   r1, r1, #3             /* r1 := first slot offset */
> +        strd  r2, r3, [r0, r1]
> +
> +        flush_xen_tlb_local r0
> +
> +        mov  pc, lr
> +ENDPROC(remove_temporary_mapping)
> +
>  /*
>   * Map the UART in the fixmap (when earlyprintk is used) and hook the
>   * fixmap table in the page tables.
> diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/config.h
> index 87851e677701..6c1b762e976d 100644
> --- a/xen/arch/arm/include/asm/config.h
> +++ b/xen/arch/arm/include/asm/config.h
> @@ -148,6 +148,20 @@
>  /* Number of domheap pagetable pages required at the second level (2MB mappings) */
>  #define DOMHEAP_SECOND_PAGES (DOMHEAP_VIRT_SIZE >> FIRST_SHIFT)
> 
> +/*
> + * The temporary area is overlapping with the domheap area. This may
> + * be used to create an alias of the first slot containing Xen mappings
> + * when turning on/off the MMU.
> + */
> +#define TEMPORARY_AREA_FIRST_SLOT    (first_table_offset(DOMHEAP_VIRT_START))
> +
> +/* Calculate the address in the temporary area */
> +#define TEMPORARY_AREA_ADDR(addr)                           \
> +     (((addr) & ~XEN_PT_LEVEL_MASK(1)) |                    \
> +      (TEMPORARY_AREA_FIRST_SLOT << XEN_PT_LEVEL_SHIFT(1)))
XEN_PT_LEVEL_{MASK/SHIFT} should be used when we do not know the level upfront.
Otherwise, no need for opencoding and you should use FIRST_MASK and FIRST_SHIFT.

~Michal
Julien Grall Jan. 16, 2023, 8:43 a.m. UTC | #3
Hi Luca,

On 13/01/2023 14:58, Luca Fancellu wrote:
>> +/*
>> + * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
>> + *
>> + * Clobbers r0 - r1
> 
> NIT: r0 - r3?

Yes. I have updated the version in my tree.

> 
>> + */
>> +remove_temporary_mapping:
>> +        /* r2:r3 := invalid page-table entry */
>> +        mov   r2, #0
>> +        mov   r3, #0
>> +
>> +        adr_l r0, boot_pgtable
>> +        mov_w r1, TEMPORARY_XEN_VIRT_START
>> +        get_table_slot r1, r1, 1     /* r1 := first slot */
>> +        lsl   r1, r1, #3             /* r1 := first slot offset */
>> +        strd  r2, r3, [r0, r1]
>> +
>> +        flush_xen_tlb_local r0
>> +
>> +        mov  pc, lr
>> +ENDPROC(remove_temporary_mapping)
>> +
> 
> The rest looks good to me, I’ve also built for arm64/32 and test this patch on fvp aarch32 mode,
> booting Dom0 and creating/running/destroying some guests.
> 
> Reviewed-by: Luca Fancellu <luca.fancellu@arm.com>
> Tested-by: Luca Fancellu <luca.fancellu@arm.com>

Thanks!

Cheers,
Julien Grall Jan. 16, 2023, 8:55 a.m. UTC | #4
On 16/01/2023 08:14, Michal Orzel wrote:
> Hi Julien,

Hi Luca,

> On 13/01/2023 11:11, Julien Grall wrote:
>> +/*
>> + * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
>> + *
>> + * Clobbers r0 - r1
>> + */
>> +remove_temporary_mapping:
>> +        /* r2:r3 := invalid page-table entry */
>> +        mov   r2, #0
>> +        mov   r3, #0
>> +
>> +        adr_l r0, boot_pgtable
>> +        mov_w r1, TEMPORARY_XEN_VIRT_START
>> +        get_table_slot r1, r1, 1     /* r1 := first slot */
> Can't we just use TEMPORARY_AREA_FIRST_SLOT?

IMHO, it would make the code a bit more difficult to read because the 
connection between TEMPORARY_XEN_VIRT_START and 
TEMPORARY_AREA_FIRST_SLOT is not totally obvious.

So I would rather prefer if this stays like that.

> 
>> +        lsl   r1, r1, #3             /* r1 := first slot offset */
>> +        strd  r2, r3, [r0, r1]
>> +
>> +        flush_xen_tlb_local r0
>> +
>> +        mov  pc, lr
>> +ENDPROC(remove_temporary_mapping)
>> +
>>   /*
>>    * Map the UART in the fixmap (when earlyprintk is used) and hook the
>>    * fixmap table in the page tables.
>> diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/config.h
>> index 87851e677701..6c1b762e976d 100644
>> --- a/xen/arch/arm/include/asm/config.h
>> +++ b/xen/arch/arm/include/asm/config.h
>> @@ -148,6 +148,20 @@
>>   /* Number of domheap pagetable pages required at the second level (2MB mappings) */
>>   #define DOMHEAP_SECOND_PAGES (DOMHEAP_VIRT_SIZE >> FIRST_SHIFT)
>>
>> +/*
>> + * The temporary area is overlapping with the domheap area. This may
>> + * be used to create an alias of the first slot containing Xen mappings
>> + * when turning on/off the MMU.
>> + */
>> +#define TEMPORARY_AREA_FIRST_SLOT    (first_table_offset(DOMHEAP_VIRT_START))
>> +
>> +/* Calculate the address in the temporary area */
>> +#define TEMPORARY_AREA_ADDR(addr)                           \
>> +     (((addr) & ~XEN_PT_LEVEL_MASK(1)) |                    \
>> +      (TEMPORARY_AREA_FIRST_SLOT << XEN_PT_LEVEL_SHIFT(1)))
> XEN_PT_LEVEL_{MASK/SHIFT} should be used when we do not know the level upfront.
> Otherwise, no need for opencoding and you should use FIRST_MASK and FIRST_SHIFT.

We discussed in the past to phase out the use of FIRST_MASK, FIRST_SHIFT 
because the name is too generic. So for new code, we should use 
XEN_PT_LEVEL_{MASK/SHIFT}.

Cheers,
Michal Orzel Jan. 16, 2023, 9:32 a.m. UTC | #5
On 16/01/2023 09:55, Julien Grall wrote:
> 
> 
> On 16/01/2023 08:14, Michal Orzel wrote:
>> Hi Julien,
> 
> Hi Luca,
> 
>> On 13/01/2023 11:11, Julien Grall wrote:
>>> +/*
>>> + * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
>>> + *
>>> + * Clobbers r0 - r1
>>> + */
>>> +remove_temporary_mapping:
>>> +        /* r2:r3 := invalid page-table entry */
>>> +        mov   r2, #0
>>> +        mov   r3, #0
>>> +
>>> +        adr_l r0, boot_pgtable
>>> +        mov_w r1, TEMPORARY_XEN_VIRT_START
>>> +        get_table_slot r1, r1, 1     /* r1 := first slot */
>> Can't we just use TEMPORARY_AREA_FIRST_SLOT?
> 
> IMHO, it would make the code a bit more difficult to read because the
> connection between TEMPORARY_XEN_VIRT_START and
> TEMPORARY_AREA_FIRST_SLOT is not totally obvious.
> 
> So I would rather prefer if this stays like that.
> 
>>
>>> +        lsl   r1, r1, #3             /* r1 := first slot offset */
>>> +        strd  r2, r3, [r0, r1]
>>> +
>>> +        flush_xen_tlb_local r0
>>> +
>>> +        mov  pc, lr
>>> +ENDPROC(remove_temporary_mapping)
>>> +
>>>   /*
>>>    * Map the UART in the fixmap (when earlyprintk is used) and hook the
>>>    * fixmap table in the page tables.
>>> diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/config.h
>>> index 87851e677701..6c1b762e976d 100644
>>> --- a/xen/arch/arm/include/asm/config.h
>>> +++ b/xen/arch/arm/include/asm/config.h
>>> @@ -148,6 +148,20 @@
>>>   /* Number of domheap pagetable pages required at the second level (2MB mappings) */
>>>   #define DOMHEAP_SECOND_PAGES (DOMHEAP_VIRT_SIZE >> FIRST_SHIFT)
>>>
>>> +/*
>>> + * The temporary area is overlapping with the domheap area. This may
>>> + * be used to create an alias of the first slot containing Xen mappings
>>> + * when turning on/off the MMU.
>>> + */
>>> +#define TEMPORARY_AREA_FIRST_SLOT    (first_table_offset(DOMHEAP_VIRT_START))
>>> +
>>> +/* Calculate the address in the temporary area */
>>> +#define TEMPORARY_AREA_ADDR(addr)                           \
>>> +     (((addr) & ~XEN_PT_LEVEL_MASK(1)) |                    \
>>> +      (TEMPORARY_AREA_FIRST_SLOT << XEN_PT_LEVEL_SHIFT(1)))
>> XEN_PT_LEVEL_{MASK/SHIFT} should be used when we do not know the level upfront.
>> Otherwise, no need for opencoding and you should use FIRST_MASK and FIRST_SHIFT.
> 
> We discussed in the past to phase out the use of FIRST_MASK, FIRST_SHIFT
> because the name is too generic. So for new code, we should use
> XEN_PT_LEVEL_{MASK/SHIFT}.
In that case:
Reviewed-by: Michal Orzel <michal.orzel@amd.com>

~Michal
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
index 67b910808b74..3800efb44169 100644
--- a/xen/arch/arm/arm32/head.S
+++ b/xen/arch/arm/arm32/head.S
@@ -35,6 +35,9 @@ 
 #define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
 #define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
 
+/* Offset between the early boot xen mapping and the runtime xen mapping */
+#define XEN_TEMPORARY_OFFSET      (TEMPORARY_XEN_VIRT_START - XEN_VIRT_START)
+
 #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_EARLY_PRINTK_INC)
 #include CONFIG_EARLY_PRINTK_INC
 #endif
@@ -94,7 +97,7 @@ 
  *   r9  - paddr(start)
  *   r10 - phys offset
  *   r11 - UART address
- *   r12 -
+ *   r12 - Temporary mapping created
  *   r13 - SP
  *   r14 - LR
  *   r15 - PC
@@ -445,6 +448,9 @@  ENDPROC(cpu_init)
  *   r9 : paddr(start)
  *   r10: phys offset
  *
+ * Output:
+ *   r12: Was a temporary mapping created?
+ *
  * Clobbers r0 - r4, r6
  *
  * Register usage within this function:
@@ -484,7 +490,11 @@  create_page_tables:
         /*
          * Setup the 1:1 mapping so we can turn the MMU on. Note that
          * only the first page of Xen will be part of the 1:1 mapping.
+         *
+         * In all the cases, we will link boot_third_id. So create the
+         * mapping in advance.
          */
+        create_mapping_entry boot_third_id, r9, r9
 
         /*
          * Find the first slot used. If the slot is not XEN_FIRST_SLOT,
@@ -501,8 +511,7 @@  create_page_tables:
         /*
          * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the
          * 1:1 mapping will use its own set of page-tables from the
-         * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle
-         * it.
+         * third level.
          */
         get_table_slot r1, r9, 2     /* r1 := second slot */
         cmp   r1, #XEN_SECOND_SLOT
@@ -513,13 +522,33 @@  create_page_tables:
 link_from_second_id:
         create_table_entry boot_second_id, boot_third_id, r9, 2
 link_from_third_id:
-        create_mapping_entry boot_third_id, r9, r9
+        /* Good news, we are not clashing with Xen virtual mapping */
+        mov   r12, #0                /* r12 := temporary mapping not created */
         mov   pc, lr
 
 virtphys_clash:
-        /* Identity map clashes with boot_third, which we cannot handle yet */
-        PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
-        b     fail
+        /*
+         * The identity map clashes with boot_third. Link boot_first_id and
+         * map Xen to a temporary mapping. See switch_to_runtime_mapping
+         * for more details.
+         */
+        PRINT("- Virt and Phys addresses clash  -\r\n")
+        PRINT("- Create temporary mapping -\r\n")
+
+        /*
+         * This will override the link to boot_second in XEN_FIRST_SLOT.
+         * The page-tables are not live yet. So no need to use
+         * break-before-make.
+         */
+        create_table_entry boot_pgtable, boot_second_id, r9, 1
+        create_table_entry boot_second_id, boot_third_id, r9, 2
+
+        /* Map boot_second (cover Xen mappings) to the temporary 1st slot */
+        mov_w r0, TEMPORARY_XEN_VIRT_START
+        create_table_entry boot_pgtable, boot_second, r0, 1
+
+        mov   r12, #1                /* r12 := temporary mapping created */
+        mov   pc, lr
 ENDPROC(create_page_tables)
 
 /*
@@ -528,9 +557,10 @@  ENDPROC(create_page_tables)
  *
  * Inputs:
  *   r9 : paddr(start)
+ *  r12 : Was the temporary mapping created?
  *   lr : Virtual address to return to
  *
- * Clobbers r0 - r3
+ * Clobbers r0 - r5
  */
 enable_mmu:
         PRINT("- Turning on paging -\r\n")
@@ -558,21 +588,79 @@  enable_mmu:
          * The MMU is turned on and we are in the 1:1 mapping. Switch
          * to the runtime mapping.
          */
-        mov_w r0, 1f
-        mov   pc, r0
+        mov   r5, lr                /* Save LR before overwritting it */
+        mov_w lr, 1f                /* Virtual address in the runtime mapping */
+        b     switch_to_runtime_mapping
 1:
+        mov   lr, r5                /* Restore LR */
         /*
-         * The 1:1 map may clash with other parts of the Xen virtual memory
-         * layout. As it is not used anymore, remove it completely to
-         * avoid having to worry about replacing existing mapping
-         * afterwards.
+         * At this point, either the 1:1 map or the temporary mapping
+         * will be present. The former may clash with other parts of the
+         * Xen virtual memory layout. As both of them are not used
+         * anymore, remove them completely to avoid having to worry
+         * about replacing existing mapping afterwards.
          *
          * On return this will jump to the virtual address requested by
          * the caller.
          */
-        b     remove_identity_mapping
+        teq   r12, #0
+        beq   remove_identity_mapping
+        b     remove_temporary_mapping
 ENDPROC(enable_mmu)
 
+/*
+ * Switch to the runtime mapping. The logic depends on whether the
+ * runtime virtual region is clashing with the physical address
+ *
+ *  - If it is not clashing, we can directly jump to the address in
+ *    the runtime mapping.
+ *  - If it is clashing, create_page_tables() would have mapped Xen to
+ *    a temporary virtual address. We need to switch to the temporary
+ *    mapping so we can remove the identity mapping and map Xen at the
+ *    correct position.
+ *
+ * Inputs
+ *    r9: paddr(start)
+ *   r12: Was a temporary mapping created?
+ *    lr: Address in the runtime mapping to jump to
+ *
+ * Clobbers r0 - r4
+ */
+switch_to_runtime_mapping:
+        /*
+         * Jump to the runtime mapping if the virt and phys are not
+         * clashing
+         */
+        teq   r12, #0
+        beq   ready_to_switch
+
+        /* We are still in the 1:1 mapping. Jump to the temporary Virtual address. */
+        mov_w r0, 1f
+        add   r0, r0, #XEN_TEMPORARY_OFFSET /* r0 := address in temporary mapping */
+        mov   pc, r0
+
+1:
+        /* Remove boot_second_id */
+        mov   r2, #0
+        mov   r3, #0
+        adr_l r0, boot_pgtable
+        get_table_slot r1, r9, 1            /* r1 := first slot */
+        lsl   r1, r1, #3                    /* r1 := first slot offset */
+        strd  r2, r3, [r0, r1]
+
+        flush_xen_tlb_local r0
+
+        /* Map boot_second into boot_pgtable */
+        mov_w r0, XEN_VIRT_START
+        create_table_entry boot_pgtable, boot_second, r0, 1
+
+        /* Ensure any page table updates are visible before continuing */
+        dsb   nsh
+
+ready_to_switch:
+        mov   pc, lr
+ENDPROC(switch_to_runtime_mapping)
+
 /*
  * Remove the 1:1 map from the page-tables. It is not easy to keep track
  * where the 1:1 map was mapped, so we will look for the top-level entry
@@ -618,6 +706,27 @@  identity_mapping_removed:
         mov   pc, lr
 ENDPROC(remove_identity_mapping)
 
+/*
+ * Remove the temporary mapping of Xen starting at TEMPORARY_XEN_VIRT_START.
+ *
+ * Clobbers r0 - r1
+ */
+remove_temporary_mapping:
+        /* r2:r3 := invalid page-table entry */
+        mov   r2, #0
+        mov   r3, #0
+
+        adr_l r0, boot_pgtable
+        mov_w r1, TEMPORARY_XEN_VIRT_START
+        get_table_slot r1, r1, 1     /* r1 := first slot */
+        lsl   r1, r1, #3             /* r1 := first slot offset */
+        strd  r2, r3, [r0, r1]
+
+        flush_xen_tlb_local r0
+
+        mov  pc, lr
+ENDPROC(remove_temporary_mapping)
+
 /*
  * Map the UART in the fixmap (when earlyprintk is used) and hook the
  * fixmap table in the page tables.
diff --git a/xen/arch/arm/include/asm/config.h b/xen/arch/arm/include/asm/config.h
index 87851e677701..6c1b762e976d 100644
--- a/xen/arch/arm/include/asm/config.h
+++ b/xen/arch/arm/include/asm/config.h
@@ -148,6 +148,20 @@ 
 /* Number of domheap pagetable pages required at the second level (2MB mappings) */
 #define DOMHEAP_SECOND_PAGES (DOMHEAP_VIRT_SIZE >> FIRST_SHIFT)
 
+/*
+ * The temporary area is overlapping with the domheap area. This may
+ * be used to create an alias of the first slot containing Xen mappings
+ * when turning on/off the MMU.
+ */
+#define TEMPORARY_AREA_FIRST_SLOT    (first_table_offset(DOMHEAP_VIRT_START))
+
+/* Calculate the address in the temporary area */
+#define TEMPORARY_AREA_ADDR(addr)                           \
+     (((addr) & ~XEN_PT_LEVEL_MASK(1)) |                    \
+      (TEMPORARY_AREA_FIRST_SLOT << XEN_PT_LEVEL_SHIFT(1)))
+
+#define TEMPORARY_XEN_VIRT_START    TEMPORARY_AREA_ADDR(XEN_VIRT_START)
+
 #else /* ARM_64 */
 
 #define SLOT0_ENTRY_BITS  39
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0fc6f2992dd1..9ebc2d0f609e 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -167,6 +167,9 @@  static void __init __maybe_unused build_assertions(void)
 #define CHECK_SAME_SLOT(level, virt1, virt2) \
     BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
 
+#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
+    BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
+
 #ifdef CONFIG_ARM_64
     CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
     CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
@@ -174,7 +177,18 @@  static void __init __maybe_unused build_assertions(void)
     CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
     CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
 
+    /*
+     * For arm32, the temporary mapping will re-use the domheap
+     * first slot and the second slots will match.
+     */
+#ifdef CONFIG_ARM_32
+    CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
+    CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+    CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+#endif
+
 #undef CHECK_SAME_SLOT
+#undef CHECK_DIFFERENT_SLOT
 }
 
 static lpae_t *xen_map_table(mfn_t mfn)