diff mbox series

[v6,08/11] xen/arm64: move MMU related code from head.S to head_mmu.S

Message ID 20221104100741.2176307-9-wei.chen@arm.com (mailing list archive)
State New, archived
Headers show
Series xen/arm: Add Armv8-R64 MPU support to Xen - Part#1 | expand

Commit Message

Wei Chen Nov. 4, 2022, 10:07 a.m. UTC
There are lots of MMU specific code in head.S. This code will not
be used in MPU systems. If we use #ifdef to gate them, the code
will become messy and hard to maintain. So we move MMU related
code to head_mmu.S, and keep common code still in head.S.

As we need to access "fail" and "puts" functions out of assembly
file, so we have to export them in this patch. And the assembly
macros: adr_l and load_paddr will be used by MMU and MPU later,
so we move them to macros.h.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
---
 xen/arch/arm/arm64/Makefile             |   3 +
 xen/arch/arm/arm64/head.S               | 407 +-----------------------
 xen/arch/arm/arm64/head_mmu.S           | 364 +++++++++++++++++++++
 xen/arch/arm/include/asm/arm64/macros.h |  52 ++-
 4 files changed, 432 insertions(+), 394 deletions(-)
 create mode 100644 xen/arch/arm/arm64/head_mmu.S

Comments

Julien Grall Nov. 6, 2022, 8:06 p.m. UTC | #1
Hi Wei,

On 04/11/2022 10:07, Wei Chen wrote:
> There are lots of MMU specific code in head.S. This code will not
> be used in MPU systems. If we use #ifdef to gate them, the code
> will become messy and hard to maintain. So we move MMU related
> code to head_mmu.S, and keep common code still in head.S.

I am afraid that you can't simply move the MMU code out of head.S 
because this will break Xen when running using the identity map.

This is because we only map the first 4KB of Xen with PA == VA. At the 
moment, we guarantee it by having everything that needs to be used in 
the identity mapping before _end_boot and checking at link time if this 
fits in 4KB.

Now that you moved the MMU code outside of head.S. We need to find a 
different way to guarantee it. One way to do it would be to create a 
section that would be used for everything that needs to be identity mapped.

> 
> As we need to access "fail" and "puts" functions out of assembly
> file, so we have to export them in this patch. And the assembly
> macros: adr_l and load_paddr will be used by MMU and MPU later,
> so we move them to macros.h.
> 
> Signed-off-by: Henry Wang <Henry.Wang@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>

In general, the first signed-off should match the author. So who is who 
here?

> ---
>   xen/arch/arm/arm64/Makefile             |   3 +
>   xen/arch/arm/arm64/head.S               | 407 +-----------------------
>   xen/arch/arm/arm64/head_mmu.S           | 364 +++++++++++++++++++++
>   xen/arch/arm/include/asm/arm64/macros.h |  52 ++-
>   4 files changed, 432 insertions(+), 394 deletions(-)
>   create mode 100644 xen/arch/arm/arm64/head_mmu.S
> 
> diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
> index 6d507da0d4..22da2f54b5 100644
> --- a/xen/arch/arm/arm64/Makefile
> +++ b/xen/arch/arm/arm64/Makefile
> @@ -8,6 +8,9 @@ obj-y += domctl.o
>   obj-y += domain.o
>   obj-y += entry.o
>   obj-y += head.o
> +ifneq ($(CONFIG_HAS_MPU),y) > +obj-y += head_mmu.o
> +endif
>   obj-y += insn.o
>   obj-$(CONFIG_LIVEPATCH) += livepatch.o
>   obj-y += smc.o
> diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> index ccedf20dc7..d9a8da9120 100644
> --- a/xen/arch/arm/arm64/head.S
> +++ b/xen/arch/arm/arm64/head.S
> @@ -25,17 +25,6 @@
>   #include <efi/efierr.h>
>   #include <asm/arm64/efibind.h>
>   
> -#define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> -#define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
> -#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
> -#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
> -#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
> -
> -/* Convenience defines to get slot used by Xen mapping. */
> -#define XEN_ZEROETH_SLOT    zeroeth_table_offset(XEN_VIRT_START)
> -#define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
> -#define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
> -
>   #define __HEAD_FLAG_PAGE_SIZE   ((PAGE_SHIFT - 10) / 2)
>   
>   #define __HEAD_FLAG_PHYS_BASE   1
> @@ -82,73 +71,22 @@
>    *  x30 - lr
>    */
>   
> -#ifdef CONFIG_EARLY_PRINTK
> -/*
> - * Macro to print a string to the UART, if there is one.
> - *
> - * Clobbers x0 - x3
> - */
> -#define PRINT(_s)          \
> -        mov   x3, lr ;     \
> -        adr   x0, 98f ;    \
> -        bl    puts    ;    \
> -        mov   lr, x3 ;     \
> -        RODATA_STR(98, _s)
> +.section .text.header, "ax", %progbits
> +/*.aarch64*/

The patch is already quite difficult to read. So I would rather prefer 
if the indentation is changed separately.

Furthermore, I think it would be best if the functions moved in the 
header are done separately to help checking (I would be able to diff the 
source with the destination more easily).

>   
>   /*
> - * Macro to print the value of register \xb
> + * Kernel startup entry point.
> + * ---------------------------

Same here about the indentation. I will not comment everywhere where the 
indentation was changed. So please look at it.

[...]

> -/*
> - * Map the UART in the fixmap (when earlyprintk is used) and hook the
> - * fixmap table in the page tables.
> - *
> - * The fixmap cannot be mapped in create_page_tables because it may
> - * clash with the 1:1 mapping.
> - *
> - * Inputs:
> - *   x20: Physical offset
> - *   x23: Early UART base physical address
> - *
> - * Clobbers x0 - x3
> - */
> -setup_fixmap:
> -#ifdef CONFIG_EARLY_PRINTK
> -        /* Add UART to the fixmap table */
> -        ldr   x0, =EARLY_UART_VIRTUAL_ADDRESS
> -        create_mapping_entry xen_fixmap, x0, x23, x1, x2, x3, type=PT_DEV_L3
> -#endif
> -        /* Map fixmap into boot_second */
> -        ldr   x0, =FIXMAP_ADDR(0)
> -        create_table_entry boot_second, xen_fixmap, x0, 2, x1, x2, x3
> -        /* Ensure any page table updates made above have occurred. */
> -        dsb   nshst
> -
> -        ret
> -ENDPROC(setup_fixmap)
> -
>   /*
>    * Setup the initial stack and jump to the C world
>    *
> @@ -810,41 +458,14 @@ launch:
>   ENDPROC(launch)
>   
>   /* Fail-stop */
> -fail:   PRINT("- Boot failed -\r\n")
> +ENTRY(fail)

This name is a bit too generic to be exposed. But it would be better to 
duplicate it.

Cheers,
Julien Grall Nov. 7, 2022, 9:34 a.m. UTC | #2
On 06/11/2022 20:06, Julien Grall wrote:
> Hi Wei,
> 
> On 04/11/2022 10:07, Wei Chen wrote:
>> There are lots of MMU specific code in head.S. This code will not
>> be used in MPU systems. If we use #ifdef to gate them, the code
>> will become messy and hard to maintain. So we move MMU related
>> code to head_mmu.S, and keep common code still in head.S.
> 
> I am afraid that you can't simply move the MMU code out of head.S 
> because this will break Xen when running using the identity map.
> 
> This is because we only map the first 4KB of Xen with PA == VA. At the 
> moment, we guarantee it by having everything that needs to be used in 
> the identity mapping before _end_boot and checking at link time if this 
> fits in 4KB.
> 
> Now that you moved the MMU code outside of head.S. We need to find a 
> different way to guarantee it. One way to do it would be to create a 
> section that would be used for everything that needs to be identity mapped.

Looking at the code this morning, I noticed that we already have the 
section ".text.header". For now, that should do the job. So we just need
to check the size of .text.header.

Ideally, checking the size should be done in a separate pre-patch so it 
is easier to review.

Cheers,
Wei Chen Nov. 9, 2022, 7:36 a.m. UTC | #3
Hi Julien,

> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: 2022年11月7日 4:06
> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org
> Cc: nd <nd@arm.com>; Stefano Stabellini <sstabellini@kernel.org>; Bertrand
> Marquis <Bertrand.Marquis@arm.com>; Volodymyr Babchuk
> <Volodymyr_Babchuk@epam.com>; Henry Wang <Henry.Wang@arm.com>
> Subject: Re: [PATCH v6 08/11] xen/arm64: move MMU related code from head.S
> to head_mmu.S
> 
> Hi Wei,
> 
> On 04/11/2022 10:07, Wei Chen wrote:
> > There are lots of MMU specific code in head.S. This code will not
> > be used in MPU systems. If we use #ifdef to gate them, the code
> > will become messy and hard to maintain. So we move MMU related
> > code to head_mmu.S, and keep common code still in head.S.
> 
> I am afraid that you can't simply move the MMU code out of head.S
> because this will break Xen when running using the identity map.
> 
> This is because we only map the first 4KB of Xen with PA == VA. At the
> moment, we guarantee it by having everything that needs to be used in
> the identity mapping before _end_boot and checking at link time if this
> fits in 4KB.
> 
> Now that you moved the MMU code outside of head.S. We need to find a
> different way to guarantee it. One way to do it would be to create a
> section that would be used for everything that needs to be identity mapped.
> 

Quote from next email
"
Looking at the code this morning, I noticed that we already have the 
section ".text.header". For now, that should do the job. So we just need
to check the size of .text.header.

Ideally, checking the size should be done in a separate pre-patch so it 
is easier to review.
"

OK. We will create a patch to check the size, and place it in the
head of the series.

> >
> > As we need to access "fail" and "puts" functions out of assembly
> > file, so we have to export them in this patch. And the assembly
> > macros: adr_l and load_paddr will be used by MMU and MPU later,
> > so we move them to macros.h.
> >
> > Signed-off-by: Henry Wang <Henry.Wang@arm.com>
> > Signed-off-by: Wei Chen <wei.chen@arm.com>
> 
> In general, the first signed-off should match the author. So who is who
> here?
> 

I will adjust this order.

> > ---
> >   xen/arch/arm/arm64/Makefile             |   3 +
> >   xen/arch/arm/arm64/head.S               | 407 +-----------------------
> >   xen/arch/arm/arm64/head_mmu.S           | 364 +++++++++++++++++++++
> >   xen/arch/arm/include/asm/arm64/macros.h |  52 ++-
> >   4 files changed, 432 insertions(+), 394 deletions(-)
> >   create mode 100644 xen/arch/arm/arm64/head_mmu.S
> >
> > diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
> > index 6d507da0d4..22da2f54b5 100644
> > --- a/xen/arch/arm/arm64/Makefile
> > +++ b/xen/arch/arm/arm64/Makefile
> > @@ -8,6 +8,9 @@ obj-y += domctl.o
> >   obj-y += domain.o
> >   obj-y += entry.o
> >   obj-y += head.o
> > +ifneq ($(CONFIG_HAS_MPU),y) > +obj-y += head_mmu.o
> > +endif
> >   obj-y += insn.o
> >   obj-$(CONFIG_LIVEPATCH) += livepatch.o
> >   obj-y += smc.o
> > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
> > index ccedf20dc7..d9a8da9120 100644
> > --- a/xen/arch/arm/arm64/head.S
> > +++ b/xen/arch/arm/arm64/head.S
> > @@ -25,17 +25,6 @@
> >   #include <efi/efierr.h>
> >   #include <asm/arm64/efibind.h>
> >
> > -#define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1
> */
> > -#define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1
> */
> > -#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1
> */
> > -#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1
> */
> > -#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1
> */
> > -
> > -/* Convenience defines to get slot used by Xen mapping. */
> > -#define XEN_ZEROETH_SLOT    zeroeth_table_offset(XEN_VIRT_START)
> > -#define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
> > -#define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
> > -
> >   #define __HEAD_FLAG_PAGE_SIZE   ((PAGE_SHIFT - 10) / 2)
> >
> >   #define __HEAD_FLAG_PHYS_BASE   1
> > @@ -82,73 +71,22 @@
> >    *  x30 - lr
> >    */
> >
> > -#ifdef CONFIG_EARLY_PRINTK
> > -/*
> > - * Macro to print a string to the UART, if there is one.
> > - *
> > - * Clobbers x0 - x3
> > - */
> > -#define PRINT(_s)          \
> > -        mov   x3, lr ;     \
> > -        adr   x0, 98f ;    \
> > -        bl    puts    ;    \
> > -        mov   lr, x3 ;     \
> > -        RODATA_STR(98, _s)
> > +.section .text.header, "ax", %progbits
> > +/*.aarch64*/
> 
> The patch is already quite difficult to read. So I would rather prefer
> if the indentation is changed separately.
> 

Ok.

> Furthermore, I think it would be best if the functions moved in the
> header are done separately to help checking (I would be able to diff the
> source with the destination more easily).
> 

Did you mean to create a separate patch for moving the functions in macro.h?
Or in header section?

> >
> >   /*
> > - * Macro to print the value of register \xb
> > + * Kernel startup entry point.
> > + * ---------------------------
> 
> Same here about the indentation. I will not comment everywhere where the
> indentation was changed. So please look at it.
> 

OK. we will use a separate patch to clean them.

> [...]
> 
> > -/*
> > - * Map the UART in the fixmap (when earlyprintk is used) and hook the
> > - * fixmap table in the page tables.
> > - *
> > - * The fixmap cannot be mapped in create_page_tables because it may
> > - * clash with the 1:1 mapping.
> > - *
> > - * Inputs:
> > - *   x20: Physical offset
> > - *   x23: Early UART base physical address
> > - *
> > - * Clobbers x0 - x3
> > - */
> > -setup_fixmap:
> > -#ifdef CONFIG_EARLY_PRINTK
> > -        /* Add UART to the fixmap table */
> > -        ldr   x0, =EARLY_UART_VIRTUAL_ADDRESS
> > -        create_mapping_entry xen_fixmap, x0, x23, x1, x2, x3,
> type=PT_DEV_L3
> > -#endif
> > -        /* Map fixmap into boot_second */
> > -        ldr   x0, =FIXMAP_ADDR(0)
> > -        create_table_entry boot_second, xen_fixmap, x0, 2, x1, x2, x3
> > -        /* Ensure any page table updates made above have occurred. */
> > -        dsb   nshst
> > -
> > -        ret
> > -ENDPROC(setup_fixmap)
> > -
> >   /*
> >    * Setup the initial stack and jump to the C world
> >    *
> > @@ -810,41 +458,14 @@ launch:
> >   ENDPROC(launch)
> >
> >   /* Fail-stop */
> > -fail:   PRINT("- Boot failed -\r\n")
> > +ENTRY(fail)
> 
> This name is a bit too generic to be exposed. But it would be better to
> duplicate it.

Yes, duplicate it might be better. We will do it in next version.

Cheers,
Wei Chen

> 
> Cheers,
> 
> --
> Julien Grall
Julien Grall Nov. 9, 2022, 6:33 p.m. UTC | #4
On 09/11/2022 07:36, Wei Chen wrote:
> Hi Julien,

Hi Wei,

>> The patch is already quite difficult to read. So I would rather prefer
>> if the indentation is changed separately.
>>
> 
> Ok.
> 
>> Furthermore, I think it would be best if the functions moved in the
>> header are done separately to help checking (I would be able to diff the
>> source with the destination more easily).
>>
> 
> Did you mean to create a separate patch for moving the functions in macro.h?

I mean the macros in macro.h. Sorry, I misquoted it.

Cheers,
Julien Grall Nov. 13, 2022, 9:42 p.m. UTC | #5
On 09/11/2022 07:36, Wei Chen wrote:
> Hi Julien,

Hi Wei,

> 
>> -----Original Message-----
>> From: Julien Grall <julien@xen.org>
>> Sent: 2022年11月7日 4:06
>> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org
>> Cc: nd <nd@arm.com>; Stefano Stabellini <sstabellini@kernel.org>; Bertrand
>> Marquis <Bertrand.Marquis@arm.com>; Volodymyr Babchuk
>> <Volodymyr_Babchuk@epam.com>; Henry Wang <Henry.Wang@arm.com>
>> Subject: Re: [PATCH v6 08/11] xen/arm64: move MMU related code from head.S
>> to head_mmu.S
>>
>> Hi Wei,
>>
>> On 04/11/2022 10:07, Wei Chen wrote:
>>> There are lots of MMU specific code in head.S. This code will not
>>> be used in MPU systems. If we use #ifdef to gate them, the code
>>> will become messy and hard to maintain. So we move MMU related
>>> code to head_mmu.S, and keep common code still in head.S.
>>
>> I am afraid that you can't simply move the MMU code out of head.S
>> because this will break Xen when running using the identity map.
>>
>> This is because we only map the first 4KB of Xen with PA == VA. At the
>> moment, we guarantee it by having everything that needs to be used in
>> the identity mapping before _end_boot and checking at link time if this
>> fits in 4KB.
>>
>> Now that you moved the MMU code outside of head.S. We need to find a
>> different way to guarantee it. One way to do it would be to create a
>> section that would be used for everything that needs to be identity mapped.
>>
> 
> Quote from next email
> "
> Looking at the code this morning, I noticed that we already have the
> section ".text.header". For now, that should do the job. So we just need
> to check the size of .text.header.
> 
> Ideally, checking the size should be done in a separate pre-patch so it
> is easier to review.
> "
> 
> OK. We will create a patch to check the size, and place it in the
> head of the series.

I thought a bit more about what I wrote. Xen binary should always start 
with the Image/Zimage header. At the moment, this is guaranteed because 
there is only one object using the section .text.header.

With the change introduced in this patch, there will be multiple objects 
using with the sections .text.header. This means we are relying on the 
compiler to always put the content of head.S first.

This is basically reverting to the behavior before commit 4267a33b19d 
("xen/build: put image header into a separate section").

Therefore we do need a separate section to be used for head_*.S and 
maybe part of head.S. This new section could be called text.idmap so we 
know what the section is used for.

Cheers,
Wei Chen Nov. 14, 2022, 5:36 a.m. UTC | #6
Hi Julien,

> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: 2022年11月14日 5:43
> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org
> Cc: nd <nd@arm.com>; Stefano Stabellini <sstabellini@kernel.org>; Bertrand
> Marquis <Bertrand.Marquis@arm.com>; Volodymyr Babchuk
> <Volodymyr_Babchuk@epam.com>; Henry Wang <Henry.Wang@arm.com>
> Subject: Re: [PATCH v6 08/11] xen/arm64: move MMU related code from head.S
> to head_mmu.S
> 
> 
> 
> On 09/11/2022 07:36, Wei Chen wrote:
> > Hi Julien,
> 
> Hi Wei,
> 
> >
> >> -----Original Message-----
> >> From: Julien Grall <julien@xen.org>
> >> Sent: 2022年11月7日 4:06
> >> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org
> >> Cc: nd <nd@arm.com>; Stefano Stabellini <sstabellini@kernel.org>;
> Bertrand
> >> Marquis <Bertrand.Marquis@arm.com>; Volodymyr Babchuk
> >> <Volodymyr_Babchuk@epam.com>; Henry Wang <Henry.Wang@arm.com>
> >> Subject: Re: [PATCH v6 08/11] xen/arm64: move MMU related code from
> head.S
> >> to head_mmu.S
> >>
> >> Hi Wei,
> >>
> >> On 04/11/2022 10:07, Wei Chen wrote:
> >>> There are lots of MMU specific code in head.S. This code will not
> >>> be used in MPU systems. If we use #ifdef to gate them, the code
> >>> will become messy and hard to maintain. So we move MMU related
> >>> code to head_mmu.S, and keep common code still in head.S.
> >>
> >> I am afraid that you can't simply move the MMU code out of head.S
> >> because this will break Xen when running using the identity map.
> >>
> >> This is because we only map the first 4KB of Xen with PA == VA. At the
> >> moment, we guarantee it by having everything that needs to be used in
> >> the identity mapping before _end_boot and checking at link time if this
> >> fits in 4KB.
> >>
> >> Now that you moved the MMU code outside of head.S. We need to find a
> >> different way to guarantee it. One way to do it would be to create a
> >> section that would be used for everything that needs to be identity
> mapped.
> >>
> >
> > Quote from next email
> > "
> > Looking at the code this morning, I noticed that we already have the
> > section ".text.header". For now, that should do the job. So we just need
> > to check the size of .text.header.
> >
> > Ideally, checking the size should be done in a separate pre-patch so it
> > is easier to review.
> > "
> >
> > OK. We will create a patch to check the size, and place it in the
> > head of the series.
> 
> I thought a bit more about what I wrote. Xen binary should always start
> with the Image/Zimage header. At the moment, this is guaranteed because
> there is only one object using the section .text.header.
> 
> With the change introduced in this patch, there will be multiple objects
> using with the sections .text.header. This means we are relying on the
> compiler to always put the content of head.S first.
> 

Yes, now it depends on the objects' order in Makefile. This should be
a risk.

> This is basically reverting to the behavior before commit 4267a33b19d
> ("xen/build: put image header into a separate section").
> 
> Therefore we do need a separate section to be used for head_*.S and
> maybe part of head.S. This new section could be called text.idmap so we
> know what the section is used for.
> 

Yes, we might place this new section after text.header.

Cheers,
Wei Chen

> Cheers,
> 
> --
> Julien Grall
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index 6d507da0d4..22da2f54b5 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -8,6 +8,9 @@  obj-y += domctl.o
 obj-y += domain.o
 obj-y += entry.o
 obj-y += head.o
+ifneq ($(CONFIG_HAS_MPU),y)
+obj-y += head_mmu.o
+endif
 obj-y += insn.o
 obj-$(CONFIG_LIVEPATCH) += livepatch.o
 obj-y += smc.o
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S
index ccedf20dc7..d9a8da9120 100644
--- a/xen/arch/arm/arm64/head.S
+++ b/xen/arch/arm/arm64/head.S
@@ -25,17 +25,6 @@ 
 #include <efi/efierr.h>
 #include <asm/arm64/efibind.h>
 
-#define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
-#define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
-#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
-#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
-#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
-
-/* Convenience defines to get slot used by Xen mapping. */
-#define XEN_ZEROETH_SLOT    zeroeth_table_offset(XEN_VIRT_START)
-#define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
-#define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
-
 #define __HEAD_FLAG_PAGE_SIZE   ((PAGE_SHIFT - 10) / 2)
 
 #define __HEAD_FLAG_PHYS_BASE   1
@@ -82,73 +71,22 @@ 
  *  x30 - lr
  */
 
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Macro to print a string to the UART, if there is one.
- *
- * Clobbers x0 - x3
- */
-#define PRINT(_s)          \
-        mov   x3, lr ;     \
-        adr   x0, 98f ;    \
-        bl    puts    ;    \
-        mov   lr, x3 ;     \
-        RODATA_STR(98, _s)
+.section .text.header, "ax", %progbits
+/*.aarch64*/
 
 /*
- * Macro to print the value of register \xb
+ * Kernel startup entry point.
+ * ---------------------------
  *
- * Clobbers x0 - x4
- */
-.macro print_reg xb
-        mov   x0, \xb
-        mov   x4, lr
-        bl    putn
-        mov   lr, x4
-.endm
-
-#else /* CONFIG_EARLY_PRINTK */
-#define PRINT(s)
-
-.macro print_reg xb
-.endm
-
-#endif /* !CONFIG_EARLY_PRINTK */
-
-/*
- * Pseudo-op for PC relative adr <reg>, <symbol> where <symbol> is
- * within the range +/- 4GB of the PC.
+ * The requirements are:
+ *   MMU = off, D-cache = off, I-cache = on or off,
+ *   x0 = physical address to the FDT blob.
  *
- * @dst: destination register (64 bit wide)
- * @sym: name of the symbol
+ * This must be the very first address in the loaded image.
+ * It should be linked at XEN_VIRT_START, and loaded at any
+ * 4K-aligned address.  All of text+data+bss must fit in 2MB,
+ * or the initial pagetable code below will need adjustment.
  */
-.macro  adr_l, dst, sym
-        adrp \dst, \sym
-        add  \dst, \dst, :lo12:\sym
-.endm
-
-/* Load the physical address of a symbol into xb */
-.macro load_paddr xb, sym
-        ldr \xb, =\sym
-        add \xb, \xb, x20
-.endm
-
-        .section .text.header, "ax", %progbits
-        /*.aarch64*/
-
-        /*
-         * Kernel startup entry point.
-         * ---------------------------
-         *
-         * The requirements are:
-         *   MMU = off, D-cache = off, I-cache = on or off,
-         *   x0 = physical address to the FDT blob.
-         *
-         * This must be the very first address in the loaded image.
-         * It should be linked at XEN_VIRT_START, and loaded at any
-         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
-         * or the initial pagetable code below will need adjustment.
-         */
 
 GLOBAL(start)
         /*
@@ -497,296 +435,6 @@  cpu_init:
         ret
 ENDPROC(cpu_init)
 
-/*
- * Macro to find the slot number at a given page-table level
- *
- * slot:     slot computed
- * virt:     virtual address
- * lvl:      page-table level
- */
-.macro get_table_slot, slot, virt, lvl
-        ubfx  \slot, \virt, #XEN_PT_LEVEL_SHIFT(\lvl), #XEN_PT_LPAE_SHIFT
-.endm
-
-/*
- * Macro to create a page table entry in \ptbl to \tbl
- *
- * ptbl:    table symbol where the entry will be created
- * tbl:     table symbol to point to
- * virt:    virtual address
- * lvl:     page-table level
- * tmp1:    scratch register
- * tmp2:    scratch register
- * tmp3:    scratch register
- *
- * Preserves \virt
- * Clobbers \tmp1, \tmp2, \tmp3
- *
- * Also use x20 for the phys offset.
- *
- * Note that all parameters using registers should be distinct.
- */
-.macro create_table_entry, ptbl, tbl, virt, lvl, tmp1, tmp2, tmp3
-        get_table_slot \tmp1, \virt, \lvl   /* \tmp1 := slot in \tlb */
-
-        load_paddr \tmp2, \tbl
-        mov   \tmp3, #PT_PT                 /* \tmp3 := right for linear PT */
-        orr   \tmp3, \tmp3, \tmp2           /*          + \tlb paddr */
-
-        adr_l \tmp2, \ptbl
-
-        str   \tmp3, [\tmp2, \tmp1, lsl #3]
-.endm
-
-/*
- * Macro to create a mapping entry in \tbl to \phys. Only mapping in 3rd
- * level table (i.e page granularity) is supported.
- *
- * ptbl:     table symbol where the entry will be created
- * virt:    virtual address
- * phys:    physical address (should be page aligned)
- * tmp1:    scratch register
- * tmp2:    scratch register
- * tmp3:    scratch register
- * type:    mapping type. If not specified it will be normal memory (PT_MEM_L3)
- *
- * Preserves \virt, \phys
- * Clobbers \tmp1, \tmp2, \tmp3
- *
- * Note that all parameters using registers should be distinct.
- */
-.macro create_mapping_entry, ptbl, virt, phys, tmp1, tmp2, tmp3, type=PT_MEM_L3
-        and   \tmp3, \phys, #THIRD_MASK     /* \tmp3 := PAGE_ALIGNED(phys) */
-
-        get_table_slot \tmp1, \virt, 3      /* \tmp1 := slot in \tlb */
-
-        mov   \tmp2, #\type                 /* \tmp2 := right for section PT */
-        orr   \tmp2, \tmp2, \tmp3           /*          + PAGE_ALIGNED(phys) */
-
-        adr_l \tmp3, \ptbl
-
-        str   \tmp2, [\tmp3, \tmp1, lsl #3]
-.endm
-
-/*
- * Rebuild the boot pagetable's first-level entries. The structure
- * is described in mm.c.
- *
- * After the CPU enables paging it will add the fixmap mapping
- * to these page tables, however this may clash with the 1:1
- * mapping. So each CPU must rebuild the page tables here with
- * the 1:1 in place.
- *
- * Inputs:
- *   x19: paddr(start)
- *   x20: phys offset
- *
- * Clobbers x0 - x4
- */
-create_page_tables:
-        /* Prepare the page-tables for mapping Xen */
-        ldr   x0, =XEN_VIRT_START
-        create_table_entry boot_pgtable, boot_first, x0, 0, x1, x2, x3
-        create_table_entry boot_first, boot_second, x0, 1, x1, x2, x3
-        create_table_entry boot_second, boot_third, x0, 2, x1, x2, x3
-
-        /* Map Xen */
-        adr_l x4, boot_third
-
-        lsr   x2, x19, #THIRD_SHIFT  /* Base address for 4K mapping */
-        lsl   x2, x2, #THIRD_SHIFT
-        mov   x3, #PT_MEM_L3         /* x2 := Section map */
-        orr   x2, x2, x3
-
-        /* ... map of vaddr(start) in boot_third */
-        mov   x1, xzr
-1:      str   x2, [x4, x1]           /* Map vaddr(start) */
-        add   x2, x2, #PAGE_SIZE     /* Next page */
-        add   x1, x1, #8             /* Next slot */
-        cmp   x1, #(XEN_PT_LPAE_ENTRIES<<3) /* 512 entries per page */
-        b.lt  1b
-
-        /*
-         * If Xen is loaded at exactly XEN_VIRT_START then we don't
-         * need an additional 1:1 mapping, the virtual mapping will
-         * suffice.
-         */
-        cmp   x19, #XEN_VIRT_START
-        bne   1f
-        ret
-1:
-        /*
-         * Setup the 1:1 mapping so we can turn the MMU on. Note that
-         * only the first page of Xen will be part of the 1:1 mapping.
-         */
-
-        /*
-         * Find the zeroeth slot used. If the slot is not
-         * XEN_ZEROETH_SLOT, then the 1:1 mapping will use its own set of
-         * page-tables from the first level.
-         */
-        get_table_slot x0, x19, 0       /* x0 := zeroeth slot */
-        cmp   x0, #XEN_ZEROETH_SLOT
-        beq   1f
-        create_table_entry boot_pgtable, boot_first_id, x19, 0, x0, x1, x2
-        b     link_from_first_id
-
-1:
-        /*
-         * Find the first slot used. If the slot is not XEN_FIRST_SLOT,
-         * then the 1:1 mapping will use its own set of page-tables from
-         * the second level.
-         */
-        get_table_slot x0, x19, 1      /* x0 := first slot */
-        cmp   x0, #XEN_FIRST_SLOT
-        beq   1f
-        create_table_entry boot_first, boot_second_id, x19, 1, x0, x1, x2
-        b     link_from_second_id
-
-1:
-        /*
-         * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the
-         * 1:1 mapping will use its own set of page-tables from the
-         * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle
-         * it.
-         */
-        get_table_slot x0, x19, 2     /* x0 := second slot */
-        cmp   x0, #XEN_SECOND_SLOT
-        beq   virtphys_clash
-        create_table_entry boot_second, boot_third_id, x19, 2, x0, x1, x2
-        b     link_from_third_id
-
-link_from_first_id:
-        create_table_entry boot_first_id, boot_second_id, x19, 1, x0, x1, x2
-link_from_second_id:
-        create_table_entry boot_second_id, boot_third_id, x19, 2, x0, x1, x2
-link_from_third_id:
-        create_mapping_entry boot_third_id, x19, x19, x0, x1, x2
-        ret
-
-virtphys_clash:
-        /* Identity map clashes with boot_third, which we cannot handle yet */
-        PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
-        b     fail
-ENDPROC(create_page_tables)
-
-/*
- * Turn on the Data Cache and the MMU. The function will return on the 1:1
- * mapping. In other word, the caller is responsible to switch to the runtime
- * mapping.
- *
- * Clobbers x0 - x3
- */
-enable_mmu:
-        PRINT("- Turning on paging -\r\n")
-
-        /*
-         * The state of the TLBs is unknown before turning on the MMU.
-         * Flush them to avoid stale one.
-         */
-        tlbi  alle2                  /* Flush hypervisor TLBs */
-        dsb   nsh
-
-        /* Write Xen's PT's paddr into TTBR0_EL2 */
-        load_paddr x0, boot_pgtable
-        msr   TTBR0_EL2, x0
-        isb
-
-        mrs   x0, SCTLR_EL2
-        orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
-        orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
-        dsb   sy                     /* Flush PTE writes and finish reads */
-        msr   SCTLR_EL2, x0          /* now paging is enabled */
-        isb                          /* Now, flush the icache */
-        ret
-ENDPROC(enable_mmu)
-
-/*
- * Remove the 1:1 map from the page-tables. It is not easy to keep track
- * where the 1:1 map was mapped, so we will look for the top-level entry
- * exclusive to the 1:1 map and remove it.
- *
- * Inputs:
- *   x19: paddr(start)
- *
- * Clobbers x0 - x1
- */
-remove_identity_mapping:
-        /*
-         * Find the zeroeth slot used. Remove the entry from zeroeth
-         * table if the slot is not XEN_ZEROETH_SLOT.
-         */
-        get_table_slot x1, x19, 0       /* x1 := zeroeth slot */
-        cmp   x1, #XEN_ZEROETH_SLOT
-        beq   1f
-        /* It is not in slot XEN_ZEROETH_SLOT, remove the entry. */
-        ldr   x0, =boot_pgtable         /* x0 := root table */
-        str   xzr, [x0, x1, lsl #3]
-        b     identity_mapping_removed
-
-1:
-        /*
-         * Find the first slot used. Remove the entry for the first
-         * table if the slot is not XEN_FIRST_SLOT.
-         */
-        get_table_slot x1, x19, 1       /* x1 := first slot */
-        cmp   x1, #XEN_FIRST_SLOT
-        beq   1f
-        /* It is not in slot XEN_FIRST_SLOT, remove the entry. */
-        ldr   x0, =boot_first           /* x0 := first table */
-        str   xzr, [x0, x1, lsl #3]
-        b     identity_mapping_removed
-
-1:
-        /*
-         * Find the second slot used. Remove the entry for the first
-         * table if the slot is not XEN_SECOND_SLOT.
-         */
-        get_table_slot x1, x19, 2       /* x1 := second slot */
-        cmp   x1, #XEN_SECOND_SLOT
-        beq   identity_mapping_removed
-        /* It is not in slot 1, remove the entry */
-        ldr   x0, =boot_second          /* x0 := second table */
-        str   xzr, [x0, x1, lsl #3]
-
-identity_mapping_removed:
-        /* See asm/arm64/flushtlb.h for the explanation of the sequence. */
-        dsb   nshst
-        tlbi  alle2
-        dsb   nsh
-        isb
-
-        ret
-ENDPROC(remove_identity_mapping)
-
-/*
- * Map the UART in the fixmap (when earlyprintk is used) and hook the
- * fixmap table in the page tables.
- *
- * The fixmap cannot be mapped in create_page_tables because it may
- * clash with the 1:1 mapping.
- *
- * Inputs:
- *   x20: Physical offset
- *   x23: Early UART base physical address
- *
- * Clobbers x0 - x3
- */
-setup_fixmap:
-#ifdef CONFIG_EARLY_PRINTK
-        /* Add UART to the fixmap table */
-        ldr   x0, =EARLY_UART_VIRTUAL_ADDRESS
-        create_mapping_entry xen_fixmap, x0, x23, x1, x2, x3, type=PT_DEV_L3
-#endif
-        /* Map fixmap into boot_second */
-        ldr   x0, =FIXMAP_ADDR(0)
-        create_table_entry boot_second, xen_fixmap, x0, 2, x1, x2, x3
-        /* Ensure any page table updates made above have occurred. */
-        dsb   nshst
-
-        ret
-ENDPROC(setup_fixmap)
-
 /*
  * Setup the initial stack and jump to the C world
  *
@@ -810,41 +458,14 @@  launch:
 ENDPROC(launch)
 
 /* Fail-stop */
-fail:   PRINT("- Boot failed -\r\n")
+ENTRY(fail)
+        PRINT("- Boot failed -\r\n")
 1:      wfe
         b     1b
 ENDPROC(fail)
 
 GLOBAL(_end_boot)
 
-/*
- * Switch TTBR
- *
- * x0    ttbr
- *
- * TODO: This code does not comply with break-before-make.
- */
-ENTRY(switch_ttbr)
-        dsb   sy                     /* Ensure the flushes happen before
-                                      * continuing */
-        isb                          /* Ensure synchronization with previous
-                                      * changes to text */
-        tlbi   alle2                 /* Flush hypervisor TLB */
-        ic     iallu                 /* Flush I-cache */
-        dsb    sy                    /* Ensure completion of TLB flush */
-        isb
-
-        msr    TTBR0_EL2, x0
-
-        isb                          /* Ensure synchronization with previous
-                                      * changes to text */
-        tlbi   alle2                 /* Flush hypervisor TLB */
-        ic     iallu                 /* Flush I-cache */
-        dsb    sy                    /* Ensure completion of TLB flush */
-        isb
-
-        ret
-ENDPROC(switch_ttbr)
 
 #ifdef CONFIG_EARLY_PRINTK
 /*
@@ -868,7 +489,7 @@  ENDPROC(init_uart)
  * x0: Nul-terminated string to print.
  * x23: Early UART base address
  * Clobbers x0-x1 */
-puts:
+ENTRY(puts)
         early_uart_ready x23, 1
         ldrb  w1, [x0], #1           /* Load next char */
         cbz   w1, 1f                 /* Exit on nul */
diff --git a/xen/arch/arm/arm64/head_mmu.S b/xen/arch/arm/arm64/head_mmu.S
new file mode 100644
index 0000000000..1a3df81a38
--- /dev/null
+++ b/xen/arch/arm/arm64/head_mmu.S
@@ -0,0 +1,364 @@ 
+/*
+ * xen/arch/arm/head_mmu.S
+ *
+ * Start-of-day code for an ARMv8-A.
+ *
+ * Ian Campbell <ian.campbell@citrix.com>
+ * Copyright (c) 2012 Citrix Systems.
+ *
+ * Based on ARMv7-A head.S by
+ * Tim Deegan <tim@xen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/page.h>
+#include <asm/early_printk.h>
+
+#define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
+#define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
+#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
+#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
+#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
+
+/* Convenience defines to get slot used by Xen mapping. */
+#define XEN_ZEROETH_SLOT    zeroeth_table_offset(XEN_VIRT_START)
+#define XEN_FIRST_SLOT      first_table_offset(XEN_VIRT_START)
+#define XEN_SECOND_SLOT     second_table_offset(XEN_VIRT_START)
+
+/*
+ * Macro to find the slot number at a given page-table level
+ *
+ * slot:     slot computed
+ * virt:     virtual address
+ * lvl:      page-table level
+ */
+.macro get_table_slot, slot, virt, lvl
+        ubfx  \slot, \virt, #XEN_PT_LEVEL_SHIFT(\lvl), #XEN_PT_LPAE_SHIFT
+.endm
+
+/*
+ * Macro to create a page table entry in \ptbl to \tbl
+ *
+ * ptbl:    table symbol where the entry will be created
+ * tbl:     table symbol to point to
+ * virt:    virtual address
+ * lvl:     page-table level
+ * tmp1:    scratch register
+ * tmp2:    scratch register
+ * tmp3:    scratch register
+ *
+ * Preserves \virt
+ * Clobbers \tmp1, \tmp2, \tmp3
+ *
+ * Also use x20 for the phys offset.
+ *
+ * Note that all parameters using registers should be distinct.
+ */
+.macro create_table_entry, ptbl, tbl, virt, lvl, tmp1, tmp2, tmp3
+        get_table_slot \tmp1, \virt, \lvl   /* \tmp1 := slot in \tlb */
+
+        load_paddr \tmp2, \tbl
+        mov   \tmp3, #PT_PT                 /* \tmp3 := right for linear PT */
+        orr   \tmp3, \tmp3, \tmp2           /*          + \tlb paddr */
+
+        adr_l \tmp2, \ptbl
+
+        str   \tmp3, [\tmp2, \tmp1, lsl #3]
+.endm
+
+/*
+ * Macro to create a mapping entry in \tbl to \phys. Only mapping in 3rd
+ * level table (i.e page granularity) is supported.
+ *
+ * ptbl:     table symbol where the entry will be created
+ * virt:    virtual address
+ * phys:    physical address (should be page aligned)
+ * tmp1:    scratch register
+ * tmp2:    scratch register
+ * tmp3:    scratch register
+ * type:    mapping type. If not specified it will be normal memory (PT_MEM_L3)
+ *
+ * Preserves \virt, \phys
+ * Clobbers \tmp1, \tmp2, \tmp3
+ *
+ * Note that all parameters using registers should be distinct.
+ */
+.macro create_mapping_entry, ptbl, virt, phys, tmp1, tmp2, tmp3, type=PT_MEM_L3
+        and   \tmp3, \phys, #THIRD_MASK     /* \tmp3 := PAGE_ALIGNED(phys) */
+
+        get_table_slot \tmp1, \virt, 3      /* \tmp1 := slot in \tlb */
+
+        mov   \tmp2, #\type                 /* \tmp2 := right for section PT */
+        orr   \tmp2, \tmp2, \tmp3           /*          + PAGE_ALIGNED(phys) */
+
+        adr_l \tmp3, \ptbl
+
+        str   \tmp2, [\tmp3, \tmp1, lsl #3]
+.endm
+
+.section .text.header, "ax", %progbits
+/*.aarch64*/
+
+/*
+ * Rebuild the boot pagetable's first-level entries. The structure
+ * is described in mm.c.
+ *
+ * After the CPU enables paging it will add the fixmap mapping
+ * to these page tables, however this may clash with the 1:1
+ * mapping. So each CPU must rebuild the page tables here with
+ * the 1:1 in place.
+ *
+ * Inputs:
+ *   x19: paddr(start)
+ *   x20: phys offset
+ *
+ * Clobbers x0 - x4
+ */
+ENTRY(create_page_tables)
+        /* Prepare the page-tables for mapping Xen */
+        ldr   x0, =XEN_VIRT_START
+        create_table_entry boot_pgtable, boot_first, x0, 0, x1, x2, x3
+        create_table_entry boot_first, boot_second, x0, 1, x1, x2, x3
+        create_table_entry boot_second, boot_third, x0, 2, x1, x2, x3
+
+        /* Map Xen */
+        adr_l x4, boot_third
+
+        lsr   x2, x19, #THIRD_SHIFT  /* Base address for 4K mapping */
+        lsl   x2, x2, #THIRD_SHIFT
+        mov   x3, #PT_MEM_L3         /* x2 := Section map */
+        orr   x2, x2, x3
+
+        /* ... map of vaddr(start) in boot_third */
+        mov   x1, xzr
+1:      str   x2, [x4, x1]           /* Map vaddr(start) */
+        add   x2, x2, #PAGE_SIZE     /* Next page */
+        add   x1, x1, #8             /* Next slot */
+        cmp   x1, #(XEN_PT_LPAE_ENTRIES<<3) /* 512 entries per page */
+        b.lt  1b
+
+        /*
+         * If Xen is loaded at exactly XEN_VIRT_START then we don't
+         * need an additional 1:1 mapping, the virtual mapping will
+         * suffice.
+         */
+        cmp   x19, #XEN_VIRT_START
+        bne   1f
+        ret
+1:
+        /*
+         * Setup the 1:1 mapping so we can turn the MMU on. Note that
+         * only the first page of Xen will be part of the 1:1 mapping.
+         */
+
+        /*
+         * Find the zeroeth slot used. If the slot is not
+         * XEN_ZEROETH_SLOT, then the 1:1 mapping will use its own set of
+         * page-tables from the first level.
+         */
+        get_table_slot x0, x19, 0       /* x0 := zeroeth slot */
+        cmp   x0, #XEN_ZEROETH_SLOT
+        beq   1f
+        create_table_entry boot_pgtable, boot_first_id, x19, 0, x0, x1, x2
+        b     link_from_first_id
+
+1:
+        /*
+         * Find the first slot used. If the slot is not XEN_FIRST_SLOT,
+         * then the 1:1 mapping will use its own set of page-tables from
+         * the second level.
+         */
+        get_table_slot x0, x19, 1      /* x0 := first slot */
+        cmp   x0, #XEN_FIRST_SLOT
+        beq   1f
+        create_table_entry boot_first, boot_second_id, x19, 1, x0, x1, x2
+        b     link_from_second_id
+
+1:
+        /*
+         * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the
+         * 1:1 mapping will use its own set of page-tables from the
+         * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle
+         * it.
+         */
+        get_table_slot x0, x19, 2     /* x0 := second slot */
+        cmp   x0, #XEN_SECOND_SLOT
+        beq   virtphys_clash
+        create_table_entry boot_second, boot_third_id, x19, 2, x0, x1, x2
+        b     link_from_third_id
+
+link_from_first_id:
+        create_table_entry boot_first_id, boot_second_id, x19, 1, x0, x1, x2
+link_from_second_id:
+        create_table_entry boot_second_id, boot_third_id, x19, 2, x0, x1, x2
+link_from_third_id:
+        create_mapping_entry boot_third_id, x19, x19, x0, x1, x2
+        ret
+
+virtphys_clash:
+        /* Identity map clashes with boot_third, which we cannot handle yet */
+        PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
+        b     fail
+ENDPROC(create_page_tables)
+
+/*
+ * Turn on the Data Cache and the MMU. The function will return on the 1:1
+ * mapping. In other word, the caller is responsible to switch to the runtime
+ * mapping.
+ *
+ * Clobbers x0 - x3
+ */
+ENTRY(enable_mmu)
+        PRINT("- Turning on paging -\r\n")
+
+        /*
+         * The state of the TLBs is unknown before turning on the MMU.
+         * Flush them to avoid stale one.
+         */
+        tlbi  alle2                  /* Flush hypervisor TLBs */
+        dsb   nsh
+
+        /* Write Xen's PT's paddr into TTBR0_EL2 */
+        load_paddr x0, boot_pgtable
+        msr   TTBR0_EL2, x0
+        isb
+
+        mrs   x0, SCTLR_EL2
+        orr   x0, x0, #SCTLR_Axx_ELx_M  /* Enable MMU */
+        orr   x0, x0, #SCTLR_Axx_ELx_C  /* Enable D-cache */
+        dsb   sy                     /* Flush PTE writes and finish reads */
+        msr   SCTLR_EL2, x0          /* now paging is enabled */
+        isb                          /* Now, flush the icache */
+        ret
+ENDPROC(enable_mmu)
+
+/*
+ * Remove the 1:1 map from the page-tables. It is not easy to keep track
+ * where the 1:1 map was mapped, so we will look for the top-level entry
+ * exclusive to the 1:1 map and remove it.
+ *
+ * Inputs:
+ *   x19: paddr(start)
+ *
+ * Clobbers x0 - x1
+ */
+ENTRY(remove_identity_mapping)
+        /*
+         * Find the zeroeth slot used. Remove the entry from zeroeth
+         * table if the slot is not XEN_ZEROETH_SLOT.
+         */
+        get_table_slot x1, x19, 0       /* x1 := zeroeth slot */
+        cmp   x1, #XEN_ZEROETH_SLOT
+        beq   1f
+        /* It is not in slot XEN_ZEROETH_SLOT, remove the entry. */
+        ldr   x0, =boot_pgtable         /* x0 := root table */
+        str   xzr, [x0, x1, lsl #3]
+        b     identity_mapping_removed
+
+1:
+        /*
+         * Find the first slot used. Remove the entry for the first
+         * table if the slot is not XEN_FIRST_SLOT.
+         */
+        get_table_slot x1, x19, 1       /* x1 := first slot */
+        cmp   x1, #XEN_FIRST_SLOT
+        beq   1f
+        /* It is not in slot XEN_FIRST_SLOT, remove the entry. */
+        ldr   x0, =boot_first           /* x0 := first table */
+        str   xzr, [x0, x1, lsl #3]
+        b     identity_mapping_removed
+
+1:
+        /*
+         * Find the second slot used. Remove the entry for the first
+         * table if the slot is not XEN_SECOND_SLOT.
+         */
+        get_table_slot x1, x19, 2       /* x1 := second slot */
+        cmp   x1, #XEN_SECOND_SLOT
+        beq   identity_mapping_removed
+        /* It is not in slot 1, remove the entry */
+        ldr   x0, =boot_second          /* x0 := second table */
+        str   xzr, [x0, x1, lsl #3]
+
+identity_mapping_removed:
+        /* See asm/arm64/flushtlb.h for the explanation of the sequence. */
+        dsb   nshst
+        tlbi  alle2
+        dsb   nsh
+        isb
+
+        ret
+ENDPROC(remove_identity_mapping)
+
+/*
+ * Map the UART in the fixmap (when earlyprintk is used) and hook the
+ * fixmap table in the page tables.
+ *
+ * The fixmap cannot be mapped in create_page_tables because it may
+ * clash with the 1:1 mapping.
+ *
+ * Inputs:
+ *   x20: Physical offset
+ *   x23: Early UART base physical address
+ *
+ * Clobbers x0 - x3
+ */
+ENTRY(setup_fixmap)
+#ifdef CONFIG_EARLY_PRINTK
+        /* Add UART to the fixmap table */
+        ldr   x0, =EARLY_UART_VIRTUAL_ADDRESS
+        create_mapping_entry xen_fixmap, x0, x23, x1, x2, x3, type=PT_DEV_L3
+#endif
+        /* Map fixmap into boot_second */
+        ldr   x0, =FIXMAP_ADDR(0)
+        create_table_entry boot_second, xen_fixmap, x0, 2, x1, x2, x3
+        /* Ensure any page table updates made above have occurred. */
+        dsb   nshst
+
+        ret
+ENDPROC(setup_fixmap)
+
+/*
+ * Switch TTBR
+ *
+ * x0    ttbr
+ *
+ * TODO: This code does not comply with break-before-make.
+ */
+ENTRY(switch_ttbr)
+        dsb   sy                     /* Ensure the flushes happen before
+                                      * continuing */
+        isb                          /* Ensure synchronization with previous
+                                      * changes to text */
+        tlbi   alle2                 /* Flush hypervisor TLB */
+        ic     iallu                 /* Flush I-cache */
+        dsb    sy                    /* Ensure completion of TLB flush */
+        isb
+
+        msr    TTBR0_EL2, x0
+
+        isb                          /* Ensure synchronization with previous
+                                      * changes to text */
+        tlbi   alle2                 /* Flush hypervisor TLB */
+        ic     iallu                 /* Flush I-cache */
+        dsb    sy                    /* Ensure completion of TLB flush */
+        isb
+
+        ret
+ENDPROC(switch_ttbr)
+
+/*
+ * Local variables:
+ * mode: ASM
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/include/asm/arm64/macros.h b/xen/arch/arm/include/asm/arm64/macros.h
index 140e223b4c..462dc70335 100644
--- a/xen/arch/arm/include/asm/arm64/macros.h
+++ b/xen/arch/arm/include/asm/arm64/macros.h
@@ -32,10 +32,60 @@ 
         hint    #22
     .endm
 
+    /*
+     * Pseudo-op for PC relative adr <reg>, <symbol> where <symbol> is
+     * within the range +/- 4GB of the PC.
+     *
+     * @dst: destination register (64 bit wide)
+     * @sym: name of the symbol
+     */
+    .macro  adr_l, dst, sym
+        adrp \dst, \sym
+        add  \dst, \dst, :lo12:\sym
+    .endm
+
+    /* Load the physical address of a symbol into xb */
+    .macro load_paddr xb, sym
+        ldr \xb, =\sym
+        add \xb, \xb, x20
+    .endm
+
 /*
  * Register aliases.
  */
 lr      .req    x30             /* link register */
 
-#endif /* __ASM_ARM_ARM64_MACROS_H */
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Macro to print a string to the UART, if there is one.
+ *
+ * Clobbers x0 - x3
+ */
+#define PRINT(_s)          \
+        mov   x3, lr ;     \
+        adr   x0, 98f ;    \
+        bl    puts    ;    \
+        mov   lr, x3 ;     \
+        RODATA_STR(98, _s)
 
+    /*
+     * Macro to print the value of register \xb
+     *
+     * Clobbers x0 - x4
+     */
+    .macro print_reg xb
+    mov   x0, \xb
+    mov   x4, lr
+    bl    putn
+    mov   lr, x4
+    .endm
+
+#else /* CONFIG_EARLY_PRINTK */
+#define PRINT(s)
+
+.macro print_reg xb
+.endm
+
+#endif /* !CONFIG_EARLY_PRINTK */
+
+#endif /* __ASM_ARM_ARM64_MACROS_H */