diff mbox series

[v6,09/13] xen/arm: Extract MMU-specific MM code

Message ID 20230828013224.669433-10-Henry.Wang@arm.com (mailing list archive)
State Superseded
Headers show
Series xen/arm: Split MMU code as the prepration of MPU work | expand

Commit Message

Henry Wang Aug. 28, 2023, 1:32 a.m. UTC
Currently, most of the code is in arm/mm.{c,h} and arm/arm64/mm.c
is MMU-specific. To make the MM code extendable, this commit extracts
the MMU-specific MM code.

Extract the boot CPU MM bringup code from arm/mm.c to mmu/setup.c.
Move arm/arm64/mm.c to arm/arm64/mmu/mm.c. Since the function
setup_directmap_mappings() has different implementations between
arm32 and arm64, move their arch-specific implementation to
arch-specific arm{32,64}/mmu/mm.c instead using #ifdef again.

For header files, move MMU-related function declarations in
asm/mm.h and the declaration of dump_pt_walk() in asm/page.h to
asm/mmu/mm.h

Also modify the build system (Makefiles in this case) to pick above
mentioned code changes.

Take the opportunity to fix the in-code comment coding styles when
possible, and drop the unnecessary #include headers in the original
arm/mm.c.

Signed-off-by: Henry Wang <Henry.Wang@arm.com>
Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v6:
- Rework the original patch
  "[v5,07/13] xen/arm: Extract MMU-specific code"
---
 xen/arch/arm/arm32/Makefile       |   1 +
 xen/arch/arm/arm32/mmu/Makefile   |   1 +
 xen/arch/arm/arm32/mmu/mm.c       |  31 +++
 xen/arch/arm/arm64/Makefile       |   1 -
 xen/arch/arm/arm64/mmu/Makefile   |   1 +
 xen/arch/arm/arm64/{ => mmu}/mm.c |  37 +++
 xen/arch/arm/include/asm/mm.h     |  22 +-
 xen/arch/arm/include/asm/mmu/mm.h |  47 ++++
 xen/arch/arm/include/asm/page.h   |  15 --
 xen/arch/arm/mm.c                 | 381 ------------------------------
 xen/arch/arm/mmu/Makefile         |   1 +
 xen/arch/arm/mmu/setup.c          | 345 +++++++++++++++++++++++++++
 12 files changed, 470 insertions(+), 413 deletions(-)
 create mode 100644 xen/arch/arm/arm32/mmu/Makefile
 create mode 100644 xen/arch/arm/arm32/mmu/mm.c
 rename xen/arch/arm/arm64/{ => mmu}/mm.c (75%)
 create mode 100644 xen/arch/arm/include/asm/mmu/mm.h
 create mode 100644 xen/arch/arm/mmu/setup.c

Comments

Ayan Kumar Halder Sept. 7, 2023, 11:34 a.m. UTC | #1
Hi Henry,

On 28/08/2023 02:32, Henry Wang wrote:
> CAUTION: This message has originated from an External Source. Please use proper judgment and caution when opening attachments, clicking links, or responding to this email.
>
>
> Currently, most of the code is in arm/mm.{c,h} and arm/arm64/mm.c
> is MMU-specific. To make the MM code extendable, this commit extracts
> the MMU-specific MM code.
>
> Extract the boot CPU MM bringup code from arm/mm.c to mmu/setup.c.
> Move arm/arm64/mm.c to arm/arm64/mmu/mm.c. Since the function
> setup_directmap_mappings() has different implementations between
> arm32 and arm64, move their arch-specific implementation to
> arch-specific arm{32,64}/mmu/mm.c instead using #ifdef again.
>
> For header files, move MMU-related function declarations in
> asm/mm.h and the declaration of dump_pt_walk() in asm/page.h to
> asm/mmu/mm.h
>
> Also modify the build system (Makefiles in this case) to pick above
> mentioned code changes.
>
> Take the opportunity to fix the in-code comment coding styles when
> possible, and drop the unnecessary #include headers in the original
> arm/mm.c.
>
> Signed-off-by: Henry Wang <Henry.Wang@arm.com>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> ---
> v6:
> - Rework the original patch
>    "[v5,07/13] xen/arm: Extract MMU-specific code"
> ---
>   xen/arch/arm/arm32/Makefile       |   1 +
>   xen/arch/arm/arm32/mmu/Makefile   |   1 +
>   xen/arch/arm/arm32/mmu/mm.c       |  31 +++
>   xen/arch/arm/arm64/Makefile       |   1 -
>   xen/arch/arm/arm64/mmu/Makefile   |   1 +
>   xen/arch/arm/arm64/{ => mmu}/mm.c |  37 +++
>   xen/arch/arm/include/asm/mm.h     |  22 +-
>   xen/arch/arm/include/asm/mmu/mm.h |  47 ++++
>   xen/arch/arm/include/asm/page.h   |  15 --
>   xen/arch/arm/mm.c                 | 381 ------------------------------
>   xen/arch/arm/mmu/Makefile         |   1 +
>   xen/arch/arm/mmu/setup.c          | 345 +++++++++++++++++++++++++++
>   12 files changed, 470 insertions(+), 413 deletions(-)
>   create mode 100644 xen/arch/arm/arm32/mmu/Makefile
>   create mode 100644 xen/arch/arm/arm32/mmu/mm.c
>   rename xen/arch/arm/arm64/{ => mmu}/mm.c (75%)
>   create mode 100644 xen/arch/arm/include/asm/mmu/mm.h
>   create mode 100644 xen/arch/arm/mmu/setup.c
>
> diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile
> index 520fb42054..40a2b4803f 100644
> --- a/xen/arch/arm/arm32/Makefile
> +++ b/xen/arch/arm/arm32/Makefile
> @@ -1,4 +1,5 @@
>   obj-y += lib/
> +obj-$(CONFIG_MMU) += mmu/
>
>   obj-$(CONFIG_EARLY_PRINTK) += debug.o
>   obj-y += domctl.o
> diff --git a/xen/arch/arm/arm32/mmu/Makefile b/xen/arch/arm/arm32/mmu/Makefile
> new file mode 100644
> index 0000000000..b18cec4836
> --- /dev/null
> +++ b/xen/arch/arm/arm32/mmu/Makefile
> @@ -0,0 +1 @@
> +obj-y += mm.o
> diff --git a/xen/arch/arm/arm32/mmu/mm.c b/xen/arch/arm/arm32/mmu/mm.c
> new file mode 100644
> index 0000000000..647baf4a81
> --- /dev/null
> +++ b/xen/arch/arm/arm32/mmu/mm.c
> @@ -0,0 +1,31 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#include <xen/init.h>
> +#include <asm/fixmap.h>
> +
> +/*
> + * Set up the direct-mapped xenheap:
> + * up to 1GB of contiguous, always-mapped memory.
> + */
> +void __init setup_directmap_mappings(unsigned long base_mfn,
> +                                     unsigned long nr_mfns)
> +{
> +    int rc;
> +
> +    rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
> +                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> +    if ( rc )
> +        panic("Unable to setup the directmap mappings.\n");
> +
> +    /* Record where the directmap is, for translation routines. */
> +    directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
> index f89d5fb4fb..72161ff22e 100644
> --- a/xen/arch/arm/arm64/Makefile
> +++ b/xen/arch/arm/arm64/Makefile
> @@ -11,7 +11,6 @@ obj-y += entry.o
>   obj-y += head.o
>   obj-y += insn.o
>   obj-$(CONFIG_LIVEPATCH) += livepatch.o
> -obj-y += mm.o
>   obj-y += smc.o
>   obj-y += smpboot.o
>   obj-$(CONFIG_ARM64_SVE) += sve.o sve-asm.o
> diff --git a/xen/arch/arm/arm64/mmu/Makefile b/xen/arch/arm/arm64/mmu/Makefile
> index 3340058c08..a8a750a3d0 100644
> --- a/xen/arch/arm/arm64/mmu/Makefile
> +++ b/xen/arch/arm/arm64/mmu/Makefile
> @@ -1 +1,2 @@
>   obj-y += head.o
> +obj-y += mm.o
> diff --git a/xen/arch/arm/arm64/mm.c b/xen/arch/arm/arm64/mmu/mm.c
> similarity index 75%
> rename from xen/arch/arm/arm64/mm.c
> rename to xen/arch/arm/arm64/mmu/mm.c
> index 78b7c7eb00..36073041ed 100644
> --- a/xen/arch/arm/arm64/mm.c
> +++ b/xen/arch/arm/arm64/mmu/mm.c
> @@ -151,6 +151,43 @@ void __init switch_ttbr(uint64_t ttbr)
>       update_identity_mapping(false);
>   }
>
> +/* Map the region in the directmap area. */
> +void __init setup_directmap_mappings(unsigned long base_mfn,
> +                                     unsigned long nr_mfns)
> +{
> +    int rc;
> +
> +    /* First call sets the directmap physical and virtual offset. */
> +    if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
> +    {
> +        unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
> +
> +        directmap_mfn_start = _mfn(base_mfn);
> +        directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
> +        /*
> +         * The base address may not be aligned to the first level
> +         * size (e.g. 1GB when using 4KB pages). This would prevent
> +         * superpage mappings for all the regions because the virtual
> +         * address and machine address should both be suitably aligned.
> +         *
> +         * Prevent that by offsetting the start of the directmap virtual
> +         * address.
> +         */
> +        directmap_virt_start = DIRECTMAP_VIRT_START +
> +            (base_mfn - mfn_gb) * PAGE_SIZE;
> +    }
> +
> +    if ( base_mfn < mfn_x(directmap_mfn_start) )
> +        panic("cannot add directmap mapping at %lx below heap start %lx\n",
> +              base_mfn, mfn_x(directmap_mfn_start));
> +
> +    rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
> +                          _mfn(base_mfn), nr_mfns,
> +                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> +    if ( rc )
> +        panic("Unable to setup the directmap mappings.\n");
> +}
> +
>   /*
>    * Local variables:
>    * mode: C
> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
> index a66aa219b1..ded6d076c5 100644
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -14,6 +14,12 @@
>   # error "unknown ARM variant"
>   #endif
>
> +#if defined(CONFIG_MMU)
> +# include <asm/mmu/mm.h>
> +#else
> +# error "Unknown memory management layout"
> +#endif
> +
>   /* Align Xen to a 2 MiB boundary. */
>   #define XEN_PADDR_ALIGN (1 << 21)
>
> @@ -170,13 +176,6 @@ extern uint64_t init_ttbr;
>
>   extern paddr_t phys_offset;
>
> -extern mfn_t directmap_mfn_start, directmap_mfn_end;
> -extern vaddr_t directmap_virt_end;
> -#ifdef CONFIG_ARM_64
> -extern vaddr_t directmap_virt_start;
> -extern unsigned long directmap_base_pdx;
> -#endif
> -
>   #ifdef CONFIG_ARM_32
>   #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
>   #define is_xen_heap_mfn(mfn) ({                                 \
> @@ -199,7 +198,6 @@ extern unsigned long directmap_base_pdx;
>
>   #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
>
> -#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
>   /* PDX of the first page in the frame table. */
>   extern unsigned long frametable_base_pdx;
>
> @@ -209,19 +207,11 @@ extern unsigned long frametable_base_pdx;
>   extern void setup_pagetables(unsigned long boot_phys_offset);
>   /* Map FDT in boot pagetable */
>   extern void *early_fdt_map(paddr_t fdt_paddr);
> -/* Switch to a new root page-tables */
> -extern void switch_ttbr(uint64_t ttbr);
>   /* Remove early mappings */
>   extern void remove_early_mappings(void);
>   /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
>    * new page table */
>   extern int init_secondary_pagetables(int cpu);
> -/*
> - * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
> - * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
> - * For Arm64, map the region in the directmap area.
> - */
> -extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
>   /* Map a frame table to cover physical addresses ps through pe */
>   extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
>   /* map a physical range in virtual memory */
> diff --git a/xen/arch/arm/include/asm/mmu/mm.h b/xen/arch/arm/include/asm/mmu/mm.h
> new file mode 100644
> index 0000000000..5e3b14519b
> --- /dev/null
> +++ b/xen/arch/arm/include/asm/mmu/mm.h
> @@ -0,0 +1,47 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +#ifndef __ARM_MMU_MM_H__
> +#define __ARM_MMU_MM_H__
> +
> +extern mfn_t directmap_mfn_start, directmap_mfn_end;

As you are declaring them for MMU specific , you also need this change :-

diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 89ecb54be2..19b60c5d1b 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -670,7 +670,7 @@ void __init populate_boot_allocator(void)

              s = bootinfo.reserved_mem.bank[i].start;
              e = s + bootinfo.reserved_mem.bank[i].size;
-#ifdef CONFIG_ARM_32
+#if (CONFIG_ARM_32 && CONFIG_MMU)
              /* Avoid the xenheap, note that the xenheap cannot across 
a bank */
              if ( s <= mfn_to_maddr(directmap_mfn_start) &&
                   e >= mfn_to_maddr(directmap_mfn_end) )
@@ -708,7 +708,7 @@ void __init populate_boot_allocator(void)
              if ( e > bank_end )
                  e = bank_end;

-#ifdef CONFIG_ARM_32
+#if (CONFIG_ARM_32 && CONFIG_MMU)
              /* Avoid the xenheap */
              if ( s < mfn_to_maddr(directmap_mfn_end) &&
                   mfn_to_maddr(directmap_mfn_start) < e )

So that directmap_mfn_end and directmap_mfn_start is used only when MMU 
is enabled.

- Ayan

> +extern vaddr_t directmap_virt_end;
> +#ifdef CONFIG_ARM_64
> +extern vaddr_t directmap_virt_start;
> +extern unsigned long directmap_base_pdx;
> +#endif
> +
> +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
> +
> +/*
> + * Print a walk of a page table or p2m
> + *
> + * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
> + * addr is the PA or IPA to translate
> + * root_level is the starting level of the page table
> + *   (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
> + * nr_root_tables is the number of concatenated tables at the root.
> + *   this can only be != 1 for P2M walks starting at the first or
> + *   subsequent level.
> + */
> +void dump_pt_walk(paddr_t ttbr, paddr_t addr,
> +                  unsigned int root_level,
> +                  unsigned int nr_root_tables);
> +
> +/* Switch to a new root page-tables */
> +extern void switch_ttbr(uint64_t ttbr);
> +/*
> + * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
> + * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
> + * For Arm64, map the region in the directmap area.
> + */
> +extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
> +
> +#endif /* __ARM_MMU_MM_H__ */
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h
> index 657c4b33db..ac65f0277a 100644
> --- a/xen/arch/arm/include/asm/page.h
> +++ b/xen/arch/arm/include/asm/page.h
> @@ -257,21 +257,6 @@ static inline void write_pte(lpae_t *p, lpae_t pte)
>   /* Flush the dcache for an entire page. */
>   void flush_page_to_ram(unsigned long mfn, bool sync_icache);
>
> -/*
> - * Print a walk of a page table or p2m
> - *
> - * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
> - * addr is the PA or IPA to translate
> - * root_level is the starting level of the page table
> - *   (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
> - * nr_root_tables is the number of concatenated tables at the root.
> - *   this can only be != 1 for P2M walks starting at the first or
> - *   subsequent level.
> - */
> -void dump_pt_walk(paddr_t ttbr, paddr_t addr,
> -                  unsigned int root_level,
> -                  unsigned int nr_root_tables);
> -
>   /* Print a walk of the hypervisor's page tables for a virtual addr. */
>   extern void dump_hyp_walk(vaddr_t addr);
>   /* Print a walk of the p2m for a domain for a physical address. */
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 3ee74542ba..eeb65ca6bb 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -11,139 +11,19 @@
>   #include <xen/domain_page.h>
>   #include <xen/grant_table.h>
>   #include <xen/guest_access.h>
> -#include <xen/libfdt/libfdt.h>
>   #include <xen/mm.h>
> -#include <xen/sizes.h>
>
>   #include <xsm/xsm.h>
>
> -#include <asm/setup.h>
> -
>   #include <public/memory.h>
>
>   /* Override macros from asm/page.h to make them work with mfn_t */
>   #undef virt_to_mfn
>   #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> -#undef mfn_to_virt
> -#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
> -
> -/* Main runtime page tables */
> -
> -/*
> - * For arm32 xen_pgtable are per-PCPU and are allocated before
> - * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
> - *
> - * xen_second, xen_fixmap and xen_xenmap are always shared between all
> - * PCPUs.
> - */
> -
> -#ifdef CONFIG_ARM_64
> -DEFINE_PAGE_TABLE(xen_pgtable);
> -static DEFINE_PAGE_TABLE(xen_first);
> -#define THIS_CPU_PGTABLE xen_pgtable
> -#else
> -/* Per-CPU pagetable pages */
> -/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
> -DEFINE_PER_CPU(lpae_t *, xen_pgtable);
> -#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
> -/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
> -DEFINE_PAGE_TABLE(cpu0_pgtable);
> -#endif
> -
> -/* Common pagetable leaves */
> -/* Second level page table used to cover Xen virtual address space */
> -static DEFINE_PAGE_TABLE(xen_second);
> -/* Third level page table used for fixmap */
> -DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
> -/*
> - * Third level page table used to map Xen itself with the XN bit set
> - * as appropriate.
> - */
> -static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
> -
> -/* Non-boot CPUs use this to find the correct pagetables. */
> -uint64_t init_ttbr;
> -
> -paddr_t phys_offset;
> -
> -/* Limits of the Xen heap */
> -mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
> -mfn_t directmap_mfn_end __read_mostly;
> -vaddr_t directmap_virt_end __read_mostly;
> -#ifdef CONFIG_ARM_64
> -vaddr_t directmap_virt_start __read_mostly;
> -unsigned long directmap_base_pdx __read_mostly;
> -#endif
>
>   unsigned long frametable_base_pdx __read_mostly;
>   unsigned long frametable_virt_end __read_mostly;
>
> -extern char __init_begin[], __init_end[];
> -
> -/* Checking VA memory layout alignment. */
> -static void __init __maybe_unused build_assertions(void)
> -{
> -    /* 2MB aligned regions */
> -    BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
> -    BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
> -    /* 1GB aligned regions */
> -#ifdef CONFIG_ARM_32
> -    BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
> -#else
> -    BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
> -#endif
> -    /* Page table structure constraints */
> -#ifdef CONFIG_ARM_64
> -    /*
> -     * The first few slots of the L0 table is reserved for the identity
> -     * mapping. Check that none of the other regions are overlapping
> -     * with it.
> -     */
> -#define CHECK_OVERLAP_WITH_IDMAP(virt) \
> -    BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
> -
> -    CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
> -    CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
> -    CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
> -    CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
> -#undef CHECK_OVERLAP_WITH_IDMAP
> -#endif
> -    BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
> -#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
> -    BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
> -#endif
> -    /*
> -     * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
> -     * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
> -     * slot in the page tables.
> -     */
> -#define CHECK_SAME_SLOT(level, virt1, virt2) \
> -    BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
> -
> -#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
> -    BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
> -
> -#ifdef CONFIG_ARM_64
> -    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
> -    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
> -#endif
> -    CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
> -    CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
> -
> -    /*
> -     * For arm32, the temporary mapping will re-use the domheap
> -     * first slot and the second slots will match.
> -     */
> -#ifdef CONFIG_ARM_32
> -    CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
> -    CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
> -    CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
> -#endif
> -
> -#undef CHECK_SAME_SLOT
> -#undef CHECK_DIFFERENT_SLOT
> -}
> -
>   void flush_page_to_ram(unsigned long mfn, bool sync_icache)
>   {
>       void *v = map_domain_page(_mfn(mfn));
> @@ -163,222 +43,6 @@ void flush_page_to_ram(unsigned long mfn, bool sync_icache)
>           invalidate_icache();
>   }
>
> -void * __init early_fdt_map(paddr_t fdt_paddr)
> -{
> -    /* We are using 2MB superpage for mapping the FDT */
> -    paddr_t base_paddr = fdt_paddr & SECOND_MASK;
> -    paddr_t offset;
> -    void *fdt_virt;
> -    uint32_t size;
> -    int rc;
> -
> -    /*
> -     * Check whether the physical FDT address is set and meets the minimum
> -     * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
> -     * least 8 bytes so that we always access the magic and size fields
> -     * of the FDT header after mapping the first chunk, double check if
> -     * that is indeed the case.
> -     */
> -    BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
> -    if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
> -        return NULL;
> -
> -    /* The FDT is mapped using 2MB superpage */
> -    BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
> -
> -    rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
> -                          SZ_2M >> PAGE_SHIFT,
> -                          PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
> -    if ( rc )
> -        panic("Unable to map the device-tree.\n");
> -
> -
> -    offset = fdt_paddr % SECOND_SIZE;
> -    fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
> -
> -    if ( fdt_magic(fdt_virt) != FDT_MAGIC )
> -        return NULL;
> -
> -    size = fdt_totalsize(fdt_virt);
> -    if ( size > MAX_FDT_SIZE )
> -        return NULL;
> -
> -    if ( (offset + size) > SZ_2M )
> -    {
> -        rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
> -                              maddr_to_mfn(base_paddr + SZ_2M),
> -                              SZ_2M >> PAGE_SHIFT,
> -                              PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
> -        if ( rc )
> -            panic("Unable to map the device-tree\n");
> -    }
> -
> -    return fdt_virt;
> -}
> -
> -void __init remove_early_mappings(void)
> -{
> -    int rc;
> -
> -    /* destroy the _PAGE_BLOCK mapping */
> -    rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
> -                             BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
> -                             _PAGE_BLOCK);
> -    BUG_ON(rc);
> -}
> -
> -/*
> - * After boot, Xen page-tables should not contain mapping that are both
> - * Writable and eXecutables.
> - *
> - * This should be called on each CPU to enforce the policy.
> - */
> -static void xen_pt_enforce_wnx(void)
> -{
> -    WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
> -    /*
> -     * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
> -     * before flushing the TLBs.
> -     */
> -    isb();
> -    flush_xen_tlb_local();
> -}
> -
> -/* Boot-time pagetable setup.
> - * Changes here may need matching changes in head.S */
> -void __init setup_pagetables(unsigned long boot_phys_offset)
> -{
> -    uint64_t ttbr;
> -    lpae_t pte, *p;
> -    int i;
> -
> -    phys_offset = boot_phys_offset;
> -
> -    arch_setup_page_tables();
> -
> -#ifdef CONFIG_ARM_64
> -    pte = pte_of_xenaddr((uintptr_t)xen_first);
> -    pte.pt.table = 1;
> -    pte.pt.xn = 0;
> -    xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
> -
> -    p = (void *) xen_first;
> -#else
> -    p = (void *) cpu0_pgtable;
> -#endif
> -
> -    /* Map xen second level page-table */
> -    p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
> -    p[0].pt.table = 1;
> -    p[0].pt.xn = 0;
> -
> -    /* Break up the Xen mapping into pages and protect them separately. */
> -    for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
> -    {
> -        vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
> -
> -        if ( !is_kernel(va) )
> -            break;
> -        pte = pte_of_xenaddr(va);
> -        pte.pt.table = 1; /* third level mappings always have this bit set */
> -        if ( is_kernel_text(va) || is_kernel_inittext(va) )
> -        {
> -            pte.pt.xn = 0;
> -            pte.pt.ro = 1;
> -        }
> -        if ( is_kernel_rodata(va) )
> -            pte.pt.ro = 1;
> -        xen_xenmap[i] = pte;
> -    }
> -
> -    /* Initialise xen second level entries ... */
> -    /* ... Xen's text etc */
> -    for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
> -    {
> -        vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
> -
> -        pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
> -        pte.pt.table = 1;
> -        xen_second[second_table_offset(va)] = pte;
> -    }
> -
> -    /* ... Fixmap */
> -    pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
> -    pte.pt.table = 1;
> -    xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
> -
> -#ifdef CONFIG_ARM_64
> -    ttbr = (uintptr_t) xen_pgtable + phys_offset;
> -#else
> -    ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
> -#endif
> -
> -    switch_ttbr(ttbr);
> -
> -    xen_pt_enforce_wnx();
> -
> -#ifdef CONFIG_ARM_32
> -    per_cpu(xen_pgtable, 0) = cpu0_pgtable;
> -#endif
> -}
> -
> -#ifdef CONFIG_ARM_32
> -/*
> - * Set up the direct-mapped xenheap:
> - * up to 1GB of contiguous, always-mapped memory.
> - */
> -void __init setup_directmap_mappings(unsigned long base_mfn,
> -                                     unsigned long nr_mfns)
> -{
> -    int rc;
> -
> -    rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
> -                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> -    if ( rc )
> -        panic("Unable to setup the directmap mappings.\n");
> -
> -    /* Record where the directmap is, for translation routines. */
> -    directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
> -}
> -#else /* CONFIG_ARM_64 */
> -/* Map the region in the directmap area. */
> -void __init setup_directmap_mappings(unsigned long base_mfn,
> -                                     unsigned long nr_mfns)
> -{
> -    int rc;
> -
> -    /* First call sets the directmap physical and virtual offset. */
> -    if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
> -    {
> -        unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
> -
> -        directmap_mfn_start = _mfn(base_mfn);
> -        directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
> -        /*
> -         * The base address may not be aligned to the first level
> -         * size (e.g. 1GB when using 4KB pages). This would prevent
> -         * superpage mappings for all the regions because the virtual
> -         * address and machine address should both be suitably aligned.
> -         *
> -         * Prevent that by offsetting the start of the directmap virtual
> -         * address.
> -         */
> -        directmap_virt_start = DIRECTMAP_VIRT_START +
> -            (base_mfn - mfn_gb) * PAGE_SIZE;
> -    }
> -
> -    if ( base_mfn < mfn_x(directmap_mfn_start) )
> -        panic("cannot add directmap mapping at %lx below heap start %lx\n",
> -              base_mfn, mfn_x(directmap_mfn_start));
> -
> -    rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
> -                          _mfn(base_mfn), nr_mfns,
> -                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
> -    if ( rc )
> -        panic("Unable to setup the directmap mappings.\n");
> -}
> -#endif
> -
>   /* Map a frame table to cover physical addresses ps through pe */
>   void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
>   {
> @@ -418,51 +82,6 @@ void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
>       frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
>   }
>
> -void *__init arch_vmap_virt_end(void)
> -{
> -    return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
> -}
> -
> -/* Release all __init and __initdata ranges to be reused */
> -void free_init_memory(void)
> -{
> -    paddr_t pa = virt_to_maddr(__init_begin);
> -    unsigned long len = __init_end - __init_begin;
> -    uint32_t insn;
> -    unsigned int i, nr = len / sizeof(insn);
> -    uint32_t *p;
> -    int rc;
> -
> -    rc = modify_xen_mappings((unsigned long)__init_begin,
> -                             (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
> -    if ( rc )
> -        panic("Unable to map RW the init section (rc = %d)\n", rc);
> -
> -    /*
> -     * From now on, init will not be used for execution anymore,
> -     * so nuke the instruction cache to remove entries related to init.
> -     */
> -    invalidate_icache_local();
> -
> -#ifdef CONFIG_ARM_32
> -    /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
> -    insn = 0xe7f000f0;
> -#else
> -    insn = AARCH64_BREAK_FAULT;
> -#endif
> -    p = (uint32_t *)__init_begin;
> -    for ( i = 0; i < nr; i++ )
> -        *(p + i) = insn;
> -
> -    rc = destroy_xen_mappings((unsigned long)__init_begin,
> -                              (unsigned long)__init_end);
> -    if ( rc )
> -        panic("Unable to remove the init section (rc = %d)\n", rc);
> -
> -    init_domheap_pages(pa, pa + len);
> -    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
> -}
> -
>   int steal_page(
>       struct domain *d, struct page_info *page, unsigned int memflags)
>   {
> diff --git a/xen/arch/arm/mmu/Makefile b/xen/arch/arm/mmu/Makefile
> index 0e82015ee1..98aea965df 100644
> --- a/xen/arch/arm/mmu/Makefile
> +++ b/xen/arch/arm/mmu/Makefile
> @@ -1,2 +1,3 @@
>   obj-y += pt.o
> +obj-y += setup.o
>   obj-y += smpboot.o
> diff --git a/xen/arch/arm/mmu/setup.c b/xen/arch/arm/mmu/setup.c
> new file mode 100644
> index 0000000000..eb0dda00dc
> --- /dev/null
> +++ b/xen/arch/arm/mmu/setup.c
> @@ -0,0 +1,345 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * xen/arch/arm/mmu/setup.c
> + *
> + * MMU system boot CPU MM bringup code.
> + */
> +
> +#include <xen/init.h>
> +#include <xen/libfdt/libfdt.h>
> +#include <xen/sizes.h>
> +
> +#include <asm/fixmap.h>
> +
> +/* Override macros from asm/page.h to make them work with mfn_t */
> +#undef mfn_to_virt
> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
> +
> +/* Main runtime page tables */
> +
> +/*
> + * For arm32 xen_pgtable are per-PCPU and are allocated before
> + * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
> + *
> + * xen_second, xen_fixmap and xen_xenmap are always shared between all
> + * PCPUs.
> + */
> +
> +#ifdef CONFIG_ARM_64
> +DEFINE_PAGE_TABLE(xen_pgtable);
> +static DEFINE_PAGE_TABLE(xen_first);
> +#define THIS_CPU_PGTABLE xen_pgtable
> +#else
> +/* Per-CPU pagetable pages */
> +/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
> +DEFINE_PER_CPU(lpae_t *, xen_pgtable);
> +#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
> +/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
> +DEFINE_PAGE_TABLE(cpu0_pgtable);
> +#endif
> +
> +/* Common pagetable leaves */
> +/* Second level page table used to cover Xen virtual address space */
> +static DEFINE_PAGE_TABLE(xen_second);
> +/* Third level page table used for fixmap */
> +DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
> +/*
> + * Third level page table used to map Xen itself with the XN bit set
> + * as appropriate.
> + */
> +static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
> +
> +/* Non-boot CPUs use this to find the correct pagetables. */
> +uint64_t init_ttbr;
> +
> +paddr_t phys_offset;
> +
> +/* Limits of the Xen heap */
> +mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
> +mfn_t directmap_mfn_end __read_mostly;
> +vaddr_t directmap_virt_end __read_mostly;
> +#ifdef CONFIG_ARM_64
> +vaddr_t directmap_virt_start __read_mostly;
> +unsigned long directmap_base_pdx __read_mostly;
> +#endif
> +
> +extern char __init_begin[], __init_end[];
> +
> +/* Checking VA memory layout alignment. */
> +static void __init __maybe_unused build_assertions(void)
> +{
> +    /* 2MB aligned regions */
> +    BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
> +    BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
> +    /* 1GB aligned regions */
> +#ifdef CONFIG_ARM_32
> +    BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
> +#else
> +    BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
> +#endif
> +    /* Page table structure constraints */
> +#ifdef CONFIG_ARM_64
> +    /*
> +     * The first few slots of the L0 table is reserved for the identity
> +     * mapping. Check that none of the other regions are overlapping
> +     * with it.
> +     */
> +#define CHECK_OVERLAP_WITH_IDMAP(virt) \
> +    BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
> +
> +    CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
> +    CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
> +    CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
> +    CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
> +#undef CHECK_OVERLAP_WITH_IDMAP
> +#endif
> +    BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
> +#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
> +    BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
> +#endif
> +    /*
> +     * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
> +     * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
> +     * slot in the page tables.
> +     */
> +#define CHECK_SAME_SLOT(level, virt1, virt2) \
> +    BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
> +
> +#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
> +    BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
> +
> +#ifdef CONFIG_ARM_64
> +    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
> +    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
> +#endif
> +    CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
> +    CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
> +
> +    /*
> +     * For arm32, the temporary mapping will re-use the domheap
> +     * first slot and the second slots will match.
> +     */
> +#ifdef CONFIG_ARM_32
> +    CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
> +    CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
> +    CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
> +#endif
> +
> +#undef CHECK_SAME_SLOT
> +#undef CHECK_DIFFERENT_SLOT
> +}
> +
> +void * __init early_fdt_map(paddr_t fdt_paddr)
> +{
> +    /* We are using 2MB superpage for mapping the FDT */
> +    paddr_t base_paddr = fdt_paddr & SECOND_MASK;
> +    paddr_t offset;
> +    void *fdt_virt;
> +    uint32_t size;
> +    int rc;
> +
> +    /*
> +     * Check whether the physical FDT address is set and meets the minimum
> +     * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
> +     * least 8 bytes so that we always access the magic and size fields
> +     * of the FDT header after mapping the first chunk, double check if
> +     * that is indeed the case.
> +     */
> +    BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
> +    if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
> +        return NULL;
> +
> +    /* The FDT is mapped using 2MB superpage */
> +    BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
> +
> +    rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
> +                          SZ_2M >> PAGE_SHIFT,
> +                          PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
> +    if ( rc )
> +        panic("Unable to map the device-tree.\n");
> +
> +
> +    offset = fdt_paddr % SECOND_SIZE;
> +    fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
> +
> +    if ( fdt_magic(fdt_virt) != FDT_MAGIC )
> +        return NULL;
> +
> +    size = fdt_totalsize(fdt_virt);
> +    if ( size > MAX_FDT_SIZE )
> +        return NULL;
> +
> +    if ( (offset + size) > SZ_2M )
> +    {
> +        rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
> +                              maddr_to_mfn(base_paddr + SZ_2M),
> +                              SZ_2M >> PAGE_SHIFT,
> +                              PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
> +        if ( rc )
> +            panic("Unable to map the device-tree\n");
> +    }
> +
> +    return fdt_virt;
> +}
> +
> +void __init remove_early_mappings(void)
> +{
> +    int rc;
> +
> +    /* destroy the _PAGE_BLOCK mapping */
> +    rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
> +                             BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
> +                             _PAGE_BLOCK);
> +    BUG_ON(rc);
> +}
> +
> +/*
> + * After boot, Xen page-tables should not contain mapping that are both
> + * Writable and eXecutables.
> + *
> + * This should be called on each CPU to enforce the policy.
> + */
> +static void xen_pt_enforce_wnx(void)
> +{
> +    WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
> +    /*
> +     * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
> +     * before flushing the TLBs.
> +     */
> +    isb();
> +    flush_xen_tlb_local();
> +}
> +
> +/*
> + * Boot-time pagetable setup.
> + * Changes here may need matching changes in head.S
> + */
> +void __init setup_pagetables(unsigned long boot_phys_offset)
> +{
> +    uint64_t ttbr;
> +    lpae_t pte, *p;
> +    int i;
> +
> +    phys_offset = boot_phys_offset;
> +
> +    arch_setup_page_tables();
> +
> +#ifdef CONFIG_ARM_64
> +    pte = pte_of_xenaddr((uintptr_t)xen_first);
> +    pte.pt.table = 1;
> +    pte.pt.xn = 0;
> +    xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
> +
> +    p = (void *) xen_first;
> +#else
> +    p = (void *) cpu0_pgtable;
> +#endif
> +
> +    /* Map xen second level page-table */
> +    p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
> +    p[0].pt.table = 1;
> +    p[0].pt.xn = 0;
> +
> +    /* Break up the Xen mapping into pages and protect them separately. */
> +    for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
> +    {
> +        vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
> +
> +        if ( !is_kernel(va) )
> +            break;
> +        pte = pte_of_xenaddr(va);
> +        pte.pt.table = 1; /* third level mappings always have this bit set */
> +        if ( is_kernel_text(va) || is_kernel_inittext(va) )
> +        {
> +            pte.pt.xn = 0;
> +            pte.pt.ro = 1;
> +        }
> +        if ( is_kernel_rodata(va) )
> +            pte.pt.ro = 1;
> +        xen_xenmap[i] = pte;
> +    }
> +
> +    /* Initialise xen second level entries ... */
> +    /* ... Xen's text etc */
> +    for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
> +    {
> +        vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
> +
> +        pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
> +        pte.pt.table = 1;
> +        xen_second[second_table_offset(va)] = pte;
> +    }
> +
> +    /* ... Fixmap */
> +    pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
> +    pte.pt.table = 1;
> +    xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
> +
> +#ifdef CONFIG_ARM_64
> +    ttbr = (uintptr_t) xen_pgtable + phys_offset;
> +#else
> +    ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
> +#endif
> +
> +    switch_ttbr(ttbr);
> +
> +    xen_pt_enforce_wnx();
> +
> +#ifdef CONFIG_ARM_32
> +    per_cpu(xen_pgtable, 0) = cpu0_pgtable;
> +#endif
> +}
> +
> +void *__init arch_vmap_virt_end(void)
> +{
> +    return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
> +}
> +
> +/* Release all __init and __initdata ranges to be reused */
> +void free_init_memory(void)
> +{
> +    paddr_t pa = virt_to_maddr(__init_begin);
> +    unsigned long len = __init_end - __init_begin;
> +    uint32_t insn;
> +    unsigned int i, nr = len / sizeof(insn);
> +    uint32_t *p;
> +    int rc;
> +
> +    rc = modify_xen_mappings((unsigned long)__init_begin,
> +                             (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
> +    if ( rc )
> +        panic("Unable to map RW the init section (rc = %d)\n", rc);
> +
> +    /*
> +     * From now on, init will not be used for execution anymore,
> +     * so nuke the instruction cache to remove entries related to init.
> +     */
> +    invalidate_icache_local();
> +
> +#ifdef CONFIG_ARM_32
> +    /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
> +    insn = 0xe7f000f0;
> +#else
> +    insn = AARCH64_BREAK_FAULT;
> +#endif
> +    p = (uint32_t *)__init_begin;
> +    for ( i = 0; i < nr; i++ )
> +        *(p + i) = insn;
> +
> +    rc = destroy_xen_mappings((unsigned long)__init_begin,
> +                              (unsigned long)__init_end);
> +    if ( rc )
> +        panic("Unable to remove the init section (rc = %d)\n", rc);
> +
> +    init_domheap_pages(pa, pa + len);
> +    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> --
> 2.25.1
>
>
Henry Wang Sept. 8, 2023, 1:31 a.m. UTC | #2
Hi Ayan,

> On Sep 7, 2023, at 19:34, Ayan Kumar Halder <ayankuma@amd.com> wrote:
> 
> Hi Henry,
> 
>> +
>> +extern mfn_t directmap_mfn_start, directmap_mfn_end;
> 
> As you are declaring them for MMU specific , you also need this change :-
> 
> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index 89ecb54be2..19b60c5d1b 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -670,7 +670,7 @@ void __init populate_boot_allocator(void)
> 
>              s = bootinfo.reserved_mem.bank[i].start;
>              e = s + bootinfo.reserved_mem.bank[i].size;
> -#ifdef CONFIG_ARM_32
> +#if (CONFIG_ARM_32 && CONFIG_MMU)
>              /* Avoid the xenheap, note that the xenheap cannot across a bank */
>              if ( s <= mfn_to_maddr(directmap_mfn_start) &&
>                   e >= mfn_to_maddr(directmap_mfn_end) )
> @@ -708,7 +708,7 @@ void __init populate_boot_allocator(void)
>              if ( e > bank_end )
>                  e = bank_end;
> 
> -#ifdef CONFIG_ARM_32
> +#if (CONFIG_ARM_32 && CONFIG_MMU)
>              /* Avoid the xenheap */
>              if ( s < mfn_to_maddr(directmap_mfn_end) &&
>                   mfn_to_maddr(directmap_mfn_start) < e )
> 
> So that directmap_mfn_end and directmap_mfn_start is used only when MMU is enabled.

I am not 100% sure on this, because currently there is no MPU code at
all, indicating all setup.c is MMU specific. In this case adding “&& CONFIG_MMU”
seems a little bit redundant to me. But I agree you made a point and it is correct
that when the MPU code is in, these “directmap” part should be gated with
CONFIG_MMU (or maybe split the code between arm32/arm64 to different helpers
to avoid #ifdef). Hence I would prefer doing these change when the MPU code is added.

Let’s see what maintainers will say. I am happy to do the change once we have
an agreement.

Kind regards,
Henry

> 
> - Ayan
Stefano Stabellini Sept. 8, 2023, 5:08 p.m. UTC | #3
On Fri, 8 Sep 2023, Henry Wang wrote:
> Hi Ayan,
> 
> > On Sep 7, 2023, at 19:34, Ayan Kumar Halder <ayankuma@amd.com> wrote:
> > 
> > Hi Henry,
> > 
> >> +
> >> +extern mfn_t directmap_mfn_start, directmap_mfn_end;
> > 
> > As you are declaring them for MMU specific , you also need this change :-
> > 
> > diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> > index 89ecb54be2..19b60c5d1b 100644
> > --- a/xen/arch/arm/setup.c
> > +++ b/xen/arch/arm/setup.c
> > @@ -670,7 +670,7 @@ void __init populate_boot_allocator(void)
> > 
> >              s = bootinfo.reserved_mem.bank[i].start;
> >              e = s + bootinfo.reserved_mem.bank[i].size;
> > -#ifdef CONFIG_ARM_32
> > +#if (CONFIG_ARM_32 && CONFIG_MMU)
> >              /* Avoid the xenheap, note that the xenheap cannot across a bank */
> >              if ( s <= mfn_to_maddr(directmap_mfn_start) &&
> >                   e >= mfn_to_maddr(directmap_mfn_end) )
> > @@ -708,7 +708,7 @@ void __init populate_boot_allocator(void)
> >              if ( e > bank_end )
> >                  e = bank_end;
> > 
> > -#ifdef CONFIG_ARM_32
> > +#if (CONFIG_ARM_32 && CONFIG_MMU)
> >              /* Avoid the xenheap */
> >              if ( s < mfn_to_maddr(directmap_mfn_end) &&
> >                   mfn_to_maddr(directmap_mfn_start) < e )
> > 
> > So that directmap_mfn_end and directmap_mfn_start is used only when MMU is enabled.
> 
> I am not 100% sure on this, because currently there is no MPU code at
> all, indicating all setup.c is MMU specific. In this case adding “&& CONFIG_MMU”
> seems a little bit redundant to me. But I agree you made a point and it is correct
> that when the MPU code is in, these “directmap” part should be gated with
> CONFIG_MMU (or maybe split the code between arm32/arm64 to different helpers
> to avoid #ifdef). Hence I would prefer doing these change when the MPU code is added.
> 
> Let’s see what maintainers will say. I am happy to do the change once we have
> an agreement.

It might be wiser to add && CONFIG_MMU when the MPU code is added in
case we decide to move it / shape it differently.
diff mbox series

Patch

diff --git a/xen/arch/arm/arm32/Makefile b/xen/arch/arm/arm32/Makefile
index 520fb42054..40a2b4803f 100644
--- a/xen/arch/arm/arm32/Makefile
+++ b/xen/arch/arm/arm32/Makefile
@@ -1,4 +1,5 @@ 
 obj-y += lib/
+obj-$(CONFIG_MMU) += mmu/
 
 obj-$(CONFIG_EARLY_PRINTK) += debug.o
 obj-y += domctl.o
diff --git a/xen/arch/arm/arm32/mmu/Makefile b/xen/arch/arm/arm32/mmu/Makefile
new file mode 100644
index 0000000000..b18cec4836
--- /dev/null
+++ b/xen/arch/arm/arm32/mmu/Makefile
@@ -0,0 +1 @@ 
+obj-y += mm.o
diff --git a/xen/arch/arm/arm32/mmu/mm.c b/xen/arch/arm/arm32/mmu/mm.c
new file mode 100644
index 0000000000..647baf4a81
--- /dev/null
+++ b/xen/arch/arm/arm32/mmu/mm.c
@@ -0,0 +1,31 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <xen/init.h>
+#include <asm/fixmap.h>
+
+/*
+ * Set up the direct-mapped xenheap:
+ * up to 1GB of contiguous, always-mapped memory.
+ */
+void __init setup_directmap_mappings(unsigned long base_mfn,
+                                     unsigned long nr_mfns)
+{
+    int rc;
+
+    rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
+                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+    if ( rc )
+        panic("Unable to setup the directmap mappings.\n");
+
+    /* Record where the directmap is, for translation routines. */
+    directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index f89d5fb4fb..72161ff22e 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -11,7 +11,6 @@  obj-y += entry.o
 obj-y += head.o
 obj-y += insn.o
 obj-$(CONFIG_LIVEPATCH) += livepatch.o
-obj-y += mm.o
 obj-y += smc.o
 obj-y += smpboot.o
 obj-$(CONFIG_ARM64_SVE) += sve.o sve-asm.o
diff --git a/xen/arch/arm/arm64/mmu/Makefile b/xen/arch/arm/arm64/mmu/Makefile
index 3340058c08..a8a750a3d0 100644
--- a/xen/arch/arm/arm64/mmu/Makefile
+++ b/xen/arch/arm/arm64/mmu/Makefile
@@ -1 +1,2 @@ 
 obj-y += head.o
+obj-y += mm.o
diff --git a/xen/arch/arm/arm64/mm.c b/xen/arch/arm/arm64/mmu/mm.c
similarity index 75%
rename from xen/arch/arm/arm64/mm.c
rename to xen/arch/arm/arm64/mmu/mm.c
index 78b7c7eb00..36073041ed 100644
--- a/xen/arch/arm/arm64/mm.c
+++ b/xen/arch/arm/arm64/mmu/mm.c
@@ -151,6 +151,43 @@  void __init switch_ttbr(uint64_t ttbr)
     update_identity_mapping(false);
 }
 
+/* Map the region in the directmap area. */
+void __init setup_directmap_mappings(unsigned long base_mfn,
+                                     unsigned long nr_mfns)
+{
+    int rc;
+
+    /* First call sets the directmap physical and virtual offset. */
+    if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
+    {
+        unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
+
+        directmap_mfn_start = _mfn(base_mfn);
+        directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
+        /*
+         * The base address may not be aligned to the first level
+         * size (e.g. 1GB when using 4KB pages). This would prevent
+         * superpage mappings for all the regions because the virtual
+         * address and machine address should both be suitably aligned.
+         *
+         * Prevent that by offsetting the start of the directmap virtual
+         * address.
+         */
+        directmap_virt_start = DIRECTMAP_VIRT_START +
+            (base_mfn - mfn_gb) * PAGE_SIZE;
+    }
+
+    if ( base_mfn < mfn_x(directmap_mfn_start) )
+        panic("cannot add directmap mapping at %lx below heap start %lx\n",
+              base_mfn, mfn_x(directmap_mfn_start));
+
+    rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
+                          _mfn(base_mfn), nr_mfns,
+                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
+    if ( rc )
+        panic("Unable to setup the directmap mappings.\n");
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index a66aa219b1..ded6d076c5 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -14,6 +14,12 @@ 
 # error "unknown ARM variant"
 #endif
 
+#if defined(CONFIG_MMU)
+# include <asm/mmu/mm.h>
+#else
+# error "Unknown memory management layout"
+#endif
+
 /* Align Xen to a 2 MiB boundary. */
 #define XEN_PADDR_ALIGN (1 << 21)
 
@@ -170,13 +176,6 @@  extern uint64_t init_ttbr;
 
 extern paddr_t phys_offset;
 
-extern mfn_t directmap_mfn_start, directmap_mfn_end;
-extern vaddr_t directmap_virt_end;
-#ifdef CONFIG_ARM_64
-extern vaddr_t directmap_virt_start;
-extern unsigned long directmap_base_pdx;
-#endif
-
 #ifdef CONFIG_ARM_32
 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
 #define is_xen_heap_mfn(mfn) ({                                 \
@@ -199,7 +198,6 @@  extern unsigned long directmap_base_pdx;
 
 #define maddr_get_owner(ma)   (page_get_owner(maddr_to_page((ma))))
 
-#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
 /* PDX of the first page in the frame table. */
 extern unsigned long frametable_base_pdx;
 
@@ -209,19 +207,11 @@  extern unsigned long frametable_base_pdx;
 extern void setup_pagetables(unsigned long boot_phys_offset);
 /* Map FDT in boot pagetable */
 extern void *early_fdt_map(paddr_t fdt_paddr);
-/* Switch to a new root page-tables */
-extern void switch_ttbr(uint64_t ttbr);
 /* Remove early mappings */
 extern void remove_early_mappings(void);
 /* Allocate and initialise pagetables for a secondary CPU. Sets init_ttbr to the
  * new page table */
 extern int init_secondary_pagetables(int cpu);
-/*
- * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
- * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
- * For Arm64, map the region in the directmap area.
- */
-extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
 /* Map a frame table to cover physical addresses ps through pe */
 extern void setup_frametable_mappings(paddr_t ps, paddr_t pe);
 /* map a physical range in virtual memory */
diff --git a/xen/arch/arm/include/asm/mmu/mm.h b/xen/arch/arm/include/asm/mmu/mm.h
new file mode 100644
index 0000000000..5e3b14519b
--- /dev/null
+++ b/xen/arch/arm/include/asm/mmu/mm.h
@@ -0,0 +1,47 @@ 
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __ARM_MMU_MM_H__
+#define __ARM_MMU_MM_H__
+
+extern mfn_t directmap_mfn_start, directmap_mfn_end;
+extern vaddr_t directmap_virt_end;
+#ifdef CONFIG_ARM_64
+extern vaddr_t directmap_virt_start;
+extern unsigned long directmap_base_pdx;
+#endif
+
+#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
+
+/*
+ * Print a walk of a page table or p2m
+ *
+ * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
+ * addr is the PA or IPA to translate
+ * root_level is the starting level of the page table
+ *   (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
+ * nr_root_tables is the number of concatenated tables at the root.
+ *   this can only be != 1 for P2M walks starting at the first or
+ *   subsequent level.
+ */
+void dump_pt_walk(paddr_t ttbr, paddr_t addr,
+                  unsigned int root_level,
+                  unsigned int nr_root_tables);
+
+/* Switch to a new root page-tables */
+extern void switch_ttbr(uint64_t ttbr);
+/*
+ * For Arm32, set up the direct-mapped xenheap: up to 1GB of contiguous,
+ * always-mapped memory. Base must be 32MB aligned and size a multiple of 32MB.
+ * For Arm64, map the region in the directmap area.
+ */
+extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns);
+
+#endif /* __ARM_MMU_MM_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h
index 657c4b33db..ac65f0277a 100644
--- a/xen/arch/arm/include/asm/page.h
+++ b/xen/arch/arm/include/asm/page.h
@@ -257,21 +257,6 @@  static inline void write_pte(lpae_t *p, lpae_t pte)
 /* Flush the dcache for an entire page. */
 void flush_page_to_ram(unsigned long mfn, bool sync_icache);
 
-/*
- * Print a walk of a page table or p2m
- *
- * ttbr is the base address register (TTBR0_EL2 or VTTBR_EL2)
- * addr is the PA or IPA to translate
- * root_level is the starting level of the page table
- *   (e.g. TCR_EL2.SL0 or VTCR_EL2.SL0 )
- * nr_root_tables is the number of concatenated tables at the root.
- *   this can only be != 1 for P2M walks starting at the first or
- *   subsequent level.
- */
-void dump_pt_walk(paddr_t ttbr, paddr_t addr,
-                  unsigned int root_level,
-                  unsigned int nr_root_tables);
-
 /* Print a walk of the hypervisor's page tables for a virtual addr. */
 extern void dump_hyp_walk(vaddr_t addr);
 /* Print a walk of the p2m for a domain for a physical address. */
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 3ee74542ba..eeb65ca6bb 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -11,139 +11,19 @@ 
 #include <xen/domain_page.h>
 #include <xen/grant_table.h>
 #include <xen/guest_access.h>
-#include <xen/libfdt/libfdt.h>
 #include <xen/mm.h>
-#include <xen/sizes.h>
 
 #include <xsm/xsm.h>
 
-#include <asm/setup.h>
-
 #include <public/memory.h>
 
 /* Override macros from asm/page.h to make them work with mfn_t */
 #undef virt_to_mfn
 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_virt
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
-
-/* Main runtime page tables */
-
-/*
- * For arm32 xen_pgtable are per-PCPU and are allocated before
- * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
- *
- * xen_second, xen_fixmap and xen_xenmap are always shared between all
- * PCPUs.
- */
-
-#ifdef CONFIG_ARM_64
-DEFINE_PAGE_TABLE(xen_pgtable);
-static DEFINE_PAGE_TABLE(xen_first);
-#define THIS_CPU_PGTABLE xen_pgtable
-#else
-/* Per-CPU pagetable pages */
-/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
-DEFINE_PER_CPU(lpae_t *, xen_pgtable);
-#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
-/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
-DEFINE_PAGE_TABLE(cpu0_pgtable);
-#endif
-
-/* Common pagetable leaves */
-/* Second level page table used to cover Xen virtual address space */
-static DEFINE_PAGE_TABLE(xen_second);
-/* Third level page table used for fixmap */
-DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
-/*
- * Third level page table used to map Xen itself with the XN bit set
- * as appropriate.
- */
-static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
-
-/* Non-boot CPUs use this to find the correct pagetables. */
-uint64_t init_ttbr;
-
-paddr_t phys_offset;
-
-/* Limits of the Xen heap */
-mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
-mfn_t directmap_mfn_end __read_mostly;
-vaddr_t directmap_virt_end __read_mostly;
-#ifdef CONFIG_ARM_64
-vaddr_t directmap_virt_start __read_mostly;
-unsigned long directmap_base_pdx __read_mostly;
-#endif
 
 unsigned long frametable_base_pdx __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
 
-extern char __init_begin[], __init_end[];
-
-/* Checking VA memory layout alignment. */
-static void __init __maybe_unused build_assertions(void)
-{
-    /* 2MB aligned regions */
-    BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
-    BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
-    /* 1GB aligned regions */
-#ifdef CONFIG_ARM_32
-    BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
-#else
-    BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
-#endif
-    /* Page table structure constraints */
-#ifdef CONFIG_ARM_64
-    /*
-     * The first few slots of the L0 table is reserved for the identity
-     * mapping. Check that none of the other regions are overlapping
-     * with it.
-     */
-#define CHECK_OVERLAP_WITH_IDMAP(virt) \
-    BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
-
-    CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
-    CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
-    CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
-    CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
-#undef CHECK_OVERLAP_WITH_IDMAP
-#endif
-    BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
-#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
-    BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
-#endif
-    /*
-     * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
-     * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
-     * slot in the page tables.
-     */
-#define CHECK_SAME_SLOT(level, virt1, virt2) \
-    BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
-
-#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
-    BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
-
-#ifdef CONFIG_ARM_64
-    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
-    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
-#endif
-    CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
-    CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
-
-    /*
-     * For arm32, the temporary mapping will re-use the domheap
-     * first slot and the second slots will match.
-     */
-#ifdef CONFIG_ARM_32
-    CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
-    CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
-    CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
-#endif
-
-#undef CHECK_SAME_SLOT
-#undef CHECK_DIFFERENT_SLOT
-}
-
 void flush_page_to_ram(unsigned long mfn, bool sync_icache)
 {
     void *v = map_domain_page(_mfn(mfn));
@@ -163,222 +43,6 @@  void flush_page_to_ram(unsigned long mfn, bool sync_icache)
         invalidate_icache();
 }
 
-void * __init early_fdt_map(paddr_t fdt_paddr)
-{
-    /* We are using 2MB superpage for mapping the FDT */
-    paddr_t base_paddr = fdt_paddr & SECOND_MASK;
-    paddr_t offset;
-    void *fdt_virt;
-    uint32_t size;
-    int rc;
-
-    /*
-     * Check whether the physical FDT address is set and meets the minimum
-     * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
-     * least 8 bytes so that we always access the magic and size fields
-     * of the FDT header after mapping the first chunk, double check if
-     * that is indeed the case.
-     */
-    BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
-    if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
-        return NULL;
-
-    /* The FDT is mapped using 2MB superpage */
-    BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
-
-    rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
-                          SZ_2M >> PAGE_SHIFT,
-                          PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
-    if ( rc )
-        panic("Unable to map the device-tree.\n");
-
-
-    offset = fdt_paddr % SECOND_SIZE;
-    fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
-
-    if ( fdt_magic(fdt_virt) != FDT_MAGIC )
-        return NULL;
-
-    size = fdt_totalsize(fdt_virt);
-    if ( size > MAX_FDT_SIZE )
-        return NULL;
-
-    if ( (offset + size) > SZ_2M )
-    {
-        rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
-                              maddr_to_mfn(base_paddr + SZ_2M),
-                              SZ_2M >> PAGE_SHIFT,
-                              PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
-        if ( rc )
-            panic("Unable to map the device-tree\n");
-    }
-
-    return fdt_virt;
-}
-
-void __init remove_early_mappings(void)
-{
-    int rc;
-
-    /* destroy the _PAGE_BLOCK mapping */
-    rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
-                             BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
-                             _PAGE_BLOCK);
-    BUG_ON(rc);
-}
-
-/*
- * After boot, Xen page-tables should not contain mapping that are both
- * Writable and eXecutables.
- *
- * This should be called on each CPU to enforce the policy.
- */
-static void xen_pt_enforce_wnx(void)
-{
-    WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
-    /*
-     * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
-     * before flushing the TLBs.
-     */
-    isb();
-    flush_xen_tlb_local();
-}
-
-/* Boot-time pagetable setup.
- * Changes here may need matching changes in head.S */
-void __init setup_pagetables(unsigned long boot_phys_offset)
-{
-    uint64_t ttbr;
-    lpae_t pte, *p;
-    int i;
-
-    phys_offset = boot_phys_offset;
-
-    arch_setup_page_tables();
-
-#ifdef CONFIG_ARM_64
-    pte = pte_of_xenaddr((uintptr_t)xen_first);
-    pte.pt.table = 1;
-    pte.pt.xn = 0;
-    xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
-
-    p = (void *) xen_first;
-#else
-    p = (void *) cpu0_pgtable;
-#endif
-
-    /* Map xen second level page-table */
-    p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
-    p[0].pt.table = 1;
-    p[0].pt.xn = 0;
-
-    /* Break up the Xen mapping into pages and protect them separately. */
-    for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
-    {
-        vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
-
-        if ( !is_kernel(va) )
-            break;
-        pte = pte_of_xenaddr(va);
-        pte.pt.table = 1; /* third level mappings always have this bit set */
-        if ( is_kernel_text(va) || is_kernel_inittext(va) )
-        {
-            pte.pt.xn = 0;
-            pte.pt.ro = 1;
-        }
-        if ( is_kernel_rodata(va) )
-            pte.pt.ro = 1;
-        xen_xenmap[i] = pte;
-    }
-
-    /* Initialise xen second level entries ... */
-    /* ... Xen's text etc */
-    for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
-    {
-        vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
-
-        pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
-        pte.pt.table = 1;
-        xen_second[second_table_offset(va)] = pte;
-    }
-
-    /* ... Fixmap */
-    pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
-    pte.pt.table = 1;
-    xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
-
-#ifdef CONFIG_ARM_64
-    ttbr = (uintptr_t) xen_pgtable + phys_offset;
-#else
-    ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
-#endif
-
-    switch_ttbr(ttbr);
-
-    xen_pt_enforce_wnx();
-
-#ifdef CONFIG_ARM_32
-    per_cpu(xen_pgtable, 0) = cpu0_pgtable;
-#endif
-}
-
-#ifdef CONFIG_ARM_32
-/*
- * Set up the direct-mapped xenheap:
- * up to 1GB of contiguous, always-mapped memory.
- */
-void __init setup_directmap_mappings(unsigned long base_mfn,
-                                     unsigned long nr_mfns)
-{
-    int rc;
-
-    rc = map_pages_to_xen(XENHEAP_VIRT_START, _mfn(base_mfn), nr_mfns,
-                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
-    if ( rc )
-        panic("Unable to setup the directmap mappings.\n");
-
-    /* Record where the directmap is, for translation routines. */
-    directmap_virt_end = XENHEAP_VIRT_START + nr_mfns * PAGE_SIZE;
-}
-#else /* CONFIG_ARM_64 */
-/* Map the region in the directmap area. */
-void __init setup_directmap_mappings(unsigned long base_mfn,
-                                     unsigned long nr_mfns)
-{
-    int rc;
-
-    /* First call sets the directmap physical and virtual offset. */
-    if ( mfn_eq(directmap_mfn_start, INVALID_MFN) )
-    {
-        unsigned long mfn_gb = base_mfn & ~((FIRST_SIZE >> PAGE_SHIFT) - 1);
-
-        directmap_mfn_start = _mfn(base_mfn);
-        directmap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
-        /*
-         * The base address may not be aligned to the first level
-         * size (e.g. 1GB when using 4KB pages). This would prevent
-         * superpage mappings for all the regions because the virtual
-         * address and machine address should both be suitably aligned.
-         *
-         * Prevent that by offsetting the start of the directmap virtual
-         * address.
-         */
-        directmap_virt_start = DIRECTMAP_VIRT_START +
-            (base_mfn - mfn_gb) * PAGE_SIZE;
-    }
-
-    if ( base_mfn < mfn_x(directmap_mfn_start) )
-        panic("cannot add directmap mapping at %lx below heap start %lx\n",
-              base_mfn, mfn_x(directmap_mfn_start));
-
-    rc = map_pages_to_xen((vaddr_t)__mfn_to_virt(base_mfn),
-                          _mfn(base_mfn), nr_mfns,
-                          PAGE_HYPERVISOR_RW | _PAGE_BLOCK);
-    if ( rc )
-        panic("Unable to setup the directmap mappings.\n");
-}
-#endif
-
 /* Map a frame table to cover physical addresses ps through pe */
 void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
 {
@@ -418,51 +82,6 @@  void __init setup_frametable_mappings(paddr_t ps, paddr_t pe)
     frametable_virt_end = FRAMETABLE_VIRT_START + (nr_pdxs * sizeof(struct page_info));
 }
 
-void *__init arch_vmap_virt_end(void)
-{
-    return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
-}
-
-/* Release all __init and __initdata ranges to be reused */
-void free_init_memory(void)
-{
-    paddr_t pa = virt_to_maddr(__init_begin);
-    unsigned long len = __init_end - __init_begin;
-    uint32_t insn;
-    unsigned int i, nr = len / sizeof(insn);
-    uint32_t *p;
-    int rc;
-
-    rc = modify_xen_mappings((unsigned long)__init_begin,
-                             (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
-    if ( rc )
-        panic("Unable to map RW the init section (rc = %d)\n", rc);
-
-    /*
-     * From now on, init will not be used for execution anymore,
-     * so nuke the instruction cache to remove entries related to init.
-     */
-    invalidate_icache_local();
-
-#ifdef CONFIG_ARM_32
-    /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
-    insn = 0xe7f000f0;
-#else
-    insn = AARCH64_BREAK_FAULT;
-#endif
-    p = (uint32_t *)__init_begin;
-    for ( i = 0; i < nr; i++ )
-        *(p + i) = insn;
-
-    rc = destroy_xen_mappings((unsigned long)__init_begin,
-                              (unsigned long)__init_end);
-    if ( rc )
-        panic("Unable to remove the init section (rc = %d)\n", rc);
-
-    init_domheap_pages(pa, pa + len);
-    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
-}
-
 int steal_page(
     struct domain *d, struct page_info *page, unsigned int memflags)
 {
diff --git a/xen/arch/arm/mmu/Makefile b/xen/arch/arm/mmu/Makefile
index 0e82015ee1..98aea965df 100644
--- a/xen/arch/arm/mmu/Makefile
+++ b/xen/arch/arm/mmu/Makefile
@@ -1,2 +1,3 @@ 
 obj-y += pt.o
+obj-y += setup.o
 obj-y += smpboot.o
diff --git a/xen/arch/arm/mmu/setup.c b/xen/arch/arm/mmu/setup.c
new file mode 100644
index 0000000000..eb0dda00dc
--- /dev/null
+++ b/xen/arch/arm/mmu/setup.c
@@ -0,0 +1,345 @@ 
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * xen/arch/arm/mmu/setup.c
+ *
+ * MMU system boot CPU MM bringup code.
+ */
+
+#include <xen/init.h>
+#include <xen/libfdt/libfdt.h>
+#include <xen/sizes.h>
+
+#include <asm/fixmap.h>
+
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_virt
+#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
+
+/* Main runtime page tables */
+
+/*
+ * For arm32 xen_pgtable are per-PCPU and are allocated before
+ * bringing up each CPU. For arm64 xen_pgtable is common to all PCPUs.
+ *
+ * xen_second, xen_fixmap and xen_xenmap are always shared between all
+ * PCPUs.
+ */
+
+#ifdef CONFIG_ARM_64
+DEFINE_PAGE_TABLE(xen_pgtable);
+static DEFINE_PAGE_TABLE(xen_first);
+#define THIS_CPU_PGTABLE xen_pgtable
+#else
+/* Per-CPU pagetable pages */
+/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
+DEFINE_PER_CPU(lpae_t *, xen_pgtable);
+#define THIS_CPU_PGTABLE this_cpu(xen_pgtable)
+/* Root of the trie for cpu0, other CPU's PTs are dynamically allocated */
+DEFINE_PAGE_TABLE(cpu0_pgtable);
+#endif
+
+/* Common pagetable leaves */
+/* Second level page table used to cover Xen virtual address space */
+static DEFINE_PAGE_TABLE(xen_second);
+/* Third level page table used for fixmap */
+DEFINE_BOOT_PAGE_TABLE(xen_fixmap);
+/*
+ * Third level page table used to map Xen itself with the XN bit set
+ * as appropriate.
+ */
+static DEFINE_PAGE_TABLES(xen_xenmap, XEN_NR_ENTRIES(2));
+
+/* Non-boot CPUs use this to find the correct pagetables. */
+uint64_t init_ttbr;
+
+paddr_t phys_offset;
+
+/* Limits of the Xen heap */
+mfn_t directmap_mfn_start __read_mostly = INVALID_MFN_INITIALIZER;
+mfn_t directmap_mfn_end __read_mostly;
+vaddr_t directmap_virt_end __read_mostly;
+#ifdef CONFIG_ARM_64
+vaddr_t directmap_virt_start __read_mostly;
+unsigned long directmap_base_pdx __read_mostly;
+#endif
+
+extern char __init_begin[], __init_end[];
+
+/* Checking VA memory layout alignment. */
+static void __init __maybe_unused build_assertions(void)
+{
+    /* 2MB aligned regions */
+    BUILD_BUG_ON(XEN_VIRT_START & ~SECOND_MASK);
+    BUILD_BUG_ON(FIXMAP_ADDR(0) & ~SECOND_MASK);
+    /* 1GB aligned regions */
+#ifdef CONFIG_ARM_32
+    BUILD_BUG_ON(XENHEAP_VIRT_START & ~FIRST_MASK);
+#else
+    BUILD_BUG_ON(DIRECTMAP_VIRT_START & ~FIRST_MASK);
+#endif
+    /* Page table structure constraints */
+#ifdef CONFIG_ARM_64
+    /*
+     * The first few slots of the L0 table is reserved for the identity
+     * mapping. Check that none of the other regions are overlapping
+     * with it.
+     */
+#define CHECK_OVERLAP_WITH_IDMAP(virt) \
+    BUILD_BUG_ON(zeroeth_table_offset(virt) < IDENTITY_MAPPING_AREA_NR_L0)
+
+    CHECK_OVERLAP_WITH_IDMAP(XEN_VIRT_START);
+    CHECK_OVERLAP_WITH_IDMAP(VMAP_VIRT_START);
+    CHECK_OVERLAP_WITH_IDMAP(FRAMETABLE_VIRT_START);
+    CHECK_OVERLAP_WITH_IDMAP(DIRECTMAP_VIRT_START);
+#undef CHECK_OVERLAP_WITH_IDMAP
+#endif
+    BUILD_BUG_ON(first_table_offset(XEN_VIRT_START));
+#ifdef CONFIG_ARCH_MAP_DOMAIN_PAGE
+    BUILD_BUG_ON(DOMHEAP_VIRT_START & ~FIRST_MASK);
+#endif
+    /*
+     * The boot code expects the regions XEN_VIRT_START, FIXMAP_ADDR(0),
+     * BOOT_FDT_VIRT_START to use the same 0th (arm64 only) and 1st
+     * slot in the page tables.
+     */
+#define CHECK_SAME_SLOT(level, virt1, virt2) \
+    BUILD_BUG_ON(level##_table_offset(virt1) != level##_table_offset(virt2))
+
+#define CHECK_DIFFERENT_SLOT(level, virt1, virt2) \
+    BUILD_BUG_ON(level##_table_offset(virt1) == level##_table_offset(virt2))
+
+#ifdef CONFIG_ARM_64
+    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, FIXMAP_ADDR(0));
+    CHECK_SAME_SLOT(zeroeth, XEN_VIRT_START, BOOT_FDT_VIRT_START);
+#endif
+    CHECK_SAME_SLOT(first, XEN_VIRT_START, FIXMAP_ADDR(0));
+    CHECK_SAME_SLOT(first, XEN_VIRT_START, BOOT_FDT_VIRT_START);
+
+    /*
+     * For arm32, the temporary mapping will re-use the domheap
+     * first slot and the second slots will match.
+     */
+#ifdef CONFIG_ARM_32
+    CHECK_SAME_SLOT(first, TEMPORARY_XEN_VIRT_START, DOMHEAP_VIRT_START);
+    CHECK_DIFFERENT_SLOT(first, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+    CHECK_SAME_SLOT(second, XEN_VIRT_START, TEMPORARY_XEN_VIRT_START);
+#endif
+
+#undef CHECK_SAME_SLOT
+#undef CHECK_DIFFERENT_SLOT
+}
+
+void * __init early_fdt_map(paddr_t fdt_paddr)
+{
+    /* We are using 2MB superpage for mapping the FDT */
+    paddr_t base_paddr = fdt_paddr & SECOND_MASK;
+    paddr_t offset;
+    void *fdt_virt;
+    uint32_t size;
+    int rc;
+
+    /*
+     * Check whether the physical FDT address is set and meets the minimum
+     * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be at
+     * least 8 bytes so that we always access the magic and size fields
+     * of the FDT header after mapping the first chunk, double check if
+     * that is indeed the case.
+     */
+    BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+    if ( !fdt_paddr || fdt_paddr % MIN_FDT_ALIGN )
+        return NULL;
+
+    /* The FDT is mapped using 2MB superpage */
+    BUILD_BUG_ON(BOOT_FDT_VIRT_START % SZ_2M);
+
+    rc = map_pages_to_xen(BOOT_FDT_VIRT_START, maddr_to_mfn(base_paddr),
+                          SZ_2M >> PAGE_SHIFT,
+                          PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
+    if ( rc )
+        panic("Unable to map the device-tree.\n");
+
+
+    offset = fdt_paddr % SECOND_SIZE;
+    fdt_virt = (void *)BOOT_FDT_VIRT_START + offset;
+
+    if ( fdt_magic(fdt_virt) != FDT_MAGIC )
+        return NULL;
+
+    size = fdt_totalsize(fdt_virt);
+    if ( size > MAX_FDT_SIZE )
+        return NULL;
+
+    if ( (offset + size) > SZ_2M )
+    {
+        rc = map_pages_to_xen(BOOT_FDT_VIRT_START + SZ_2M,
+                              maddr_to_mfn(base_paddr + SZ_2M),
+                              SZ_2M >> PAGE_SHIFT,
+                              PAGE_HYPERVISOR_RO | _PAGE_BLOCK);
+        if ( rc )
+            panic("Unable to map the device-tree\n");
+    }
+
+    return fdt_virt;
+}
+
+void __init remove_early_mappings(void)
+{
+    int rc;
+
+    /* destroy the _PAGE_BLOCK mapping */
+    rc = modify_xen_mappings(BOOT_FDT_VIRT_START,
+                             BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE,
+                             _PAGE_BLOCK);
+    BUG_ON(rc);
+}
+
+/*
+ * After boot, Xen page-tables should not contain mapping that are both
+ * Writable and eXecutables.
+ *
+ * This should be called on each CPU to enforce the policy.
+ */
+static void xen_pt_enforce_wnx(void)
+{
+    WRITE_SYSREG(READ_SYSREG(SCTLR_EL2) | SCTLR_Axx_ELx_WXN, SCTLR_EL2);
+    /*
+     * The TLBs may cache SCTLR_EL2.WXN. So ensure it is synchronized
+     * before flushing the TLBs.
+     */
+    isb();
+    flush_xen_tlb_local();
+}
+
+/*
+ * Boot-time pagetable setup.
+ * Changes here may need matching changes in head.S
+ */
+void __init setup_pagetables(unsigned long boot_phys_offset)
+{
+    uint64_t ttbr;
+    lpae_t pte, *p;
+    int i;
+
+    phys_offset = boot_phys_offset;
+
+    arch_setup_page_tables();
+
+#ifdef CONFIG_ARM_64
+    pte = pte_of_xenaddr((uintptr_t)xen_first);
+    pte.pt.table = 1;
+    pte.pt.xn = 0;
+    xen_pgtable[zeroeth_table_offset(XEN_VIRT_START)] = pte;
+
+    p = (void *) xen_first;
+#else
+    p = (void *) cpu0_pgtable;
+#endif
+
+    /* Map xen second level page-table */
+    p[0] = pte_of_xenaddr((uintptr_t)(xen_second));
+    p[0].pt.table = 1;
+    p[0].pt.xn = 0;
+
+    /* Break up the Xen mapping into pages and protect them separately. */
+    for ( i = 0; i < XEN_NR_ENTRIES(3); i++ )
+    {
+        vaddr_t va = XEN_VIRT_START + (i << PAGE_SHIFT);
+
+        if ( !is_kernel(va) )
+            break;
+        pte = pte_of_xenaddr(va);
+        pte.pt.table = 1; /* third level mappings always have this bit set */
+        if ( is_kernel_text(va) || is_kernel_inittext(va) )
+        {
+            pte.pt.xn = 0;
+            pte.pt.ro = 1;
+        }
+        if ( is_kernel_rodata(va) )
+            pte.pt.ro = 1;
+        xen_xenmap[i] = pte;
+    }
+
+    /* Initialise xen second level entries ... */
+    /* ... Xen's text etc */
+    for ( i = 0; i < XEN_NR_ENTRIES(2); i++ )
+    {
+        vaddr_t va = XEN_VIRT_START + (i << XEN_PT_LEVEL_SHIFT(2));
+
+        pte = pte_of_xenaddr((vaddr_t)(xen_xenmap + i * XEN_PT_LPAE_ENTRIES));
+        pte.pt.table = 1;
+        xen_second[second_table_offset(va)] = pte;
+    }
+
+    /* ... Fixmap */
+    pte = pte_of_xenaddr((vaddr_t)xen_fixmap);
+    pte.pt.table = 1;
+    xen_second[second_table_offset(FIXMAP_ADDR(0))] = pte;
+
+#ifdef CONFIG_ARM_64
+    ttbr = (uintptr_t) xen_pgtable + phys_offset;
+#else
+    ttbr = (uintptr_t) cpu0_pgtable + phys_offset;
+#endif
+
+    switch_ttbr(ttbr);
+
+    xen_pt_enforce_wnx();
+
+#ifdef CONFIG_ARM_32
+    per_cpu(xen_pgtable, 0) = cpu0_pgtable;
+#endif
+}
+
+void *__init arch_vmap_virt_end(void)
+{
+    return (void *)(VMAP_VIRT_START + VMAP_VIRT_SIZE);
+}
+
+/* Release all __init and __initdata ranges to be reused */
+void free_init_memory(void)
+{
+    paddr_t pa = virt_to_maddr(__init_begin);
+    unsigned long len = __init_end - __init_begin;
+    uint32_t insn;
+    unsigned int i, nr = len / sizeof(insn);
+    uint32_t *p;
+    int rc;
+
+    rc = modify_xen_mappings((unsigned long)__init_begin,
+                             (unsigned long)__init_end, PAGE_HYPERVISOR_RW);
+    if ( rc )
+        panic("Unable to map RW the init section (rc = %d)\n", rc);
+
+    /*
+     * From now on, init will not be used for execution anymore,
+     * so nuke the instruction cache to remove entries related to init.
+     */
+    invalidate_icache_local();
+
+#ifdef CONFIG_ARM_32
+    /* udf instruction i.e (see A8.8.247 in ARM DDI 0406C.c) */
+    insn = 0xe7f000f0;
+#else
+    insn = AARCH64_BREAK_FAULT;
+#endif
+    p = (uint32_t *)__init_begin;
+    for ( i = 0; i < nr; i++ )
+        *(p + i) = insn;
+
+    rc = destroy_xen_mappings((unsigned long)__init_begin,
+                              (unsigned long)__init_end);
+    if ( rc )
+        panic("Unable to remove the init section (rc = %d)\n", rc);
+
+    init_domheap_pages(pa, pa + len);
+    printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */