diff mbox series

[v2,17/40] xen/mpu: plump virt/maddr/mfn convertion in MPU system

Message ID 20230113052914.3845596-18-Penny.Zheng@arm.com (mailing list archive)
State New, archived
Headers show
Series xen/arm: Add Armv8-R64 MPU support to Xen - Part#1 | expand

Commit Message

Penny Zheng Jan. 13, 2023, 5:28 a.m. UTC
virt_to_maddr and maddr_to_virt are used widely in Xen code. So
even there is no VMSA in MPU system, we keep the interface name to
stay the same code flow.

We move the existing virt/maddr convertion from mm.h to mm_mmu.h.
And the MPU version of virt/maddr convertion is simple, returning
the input address as the output.

We should overide virt_to_mfn/mfn_to_virt in source file mm_mpu.c the
same way in mm_mmu.c.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
---
 xen/arch/arm/include/asm/mm.h     | 26 --------------------------
 xen/arch/arm/include/asm/mm_mmu.h | 26 ++++++++++++++++++++++++++
 xen/arch/arm/include/asm/mm_mpu.h | 13 +++++++++++++
 xen/arch/arm/mm_mpu.c             |  6 ++++++
 4 files changed, 45 insertions(+), 26 deletions(-)

Comments

Julien Grall Feb. 5, 2023, 9:36 p.m. UTC | #1
Hi,

title: typo: s/convertion/conversion/

On 13/01/2023 05:28, Penny Zheng wrote:
> virt_to_maddr and maddr_to_virt are used widely in Xen code. So
> even there is no VMSA in MPU system, we keep the interface name to
> stay the same code flow.
> 
> We move the existing virt/maddr convertion from mm.h to mm_mmu.h.
> And the MPU version of virt/maddr convertion is simple, returning

ditto.

> the input address as the output.
> 
> We should overide virt_to_mfn/mfn_to_virt in source file mm_mpu.c the

ditto: s/overide/override/

> same way in mm_mmu.c.
> 
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> ---
>   xen/arch/arm/include/asm/mm.h     | 26 --------------------------
>   xen/arch/arm/include/asm/mm_mmu.h | 26 ++++++++++++++++++++++++++
>   xen/arch/arm/include/asm/mm_mpu.h | 13 +++++++++++++
>   xen/arch/arm/mm_mpu.c             |  6 ++++++
>   4 files changed, 45 insertions(+), 26 deletions(-)
> 
> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
> index 9b4c07d965..e29158028a 100644
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -250,32 +250,6 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
>   /* Page-align address and convert to frame number format */
>   #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
>   
> -static inline paddr_t __virt_to_maddr(vaddr_t va)
> -{
> -    uint64_t par = va_to_par(va);
> -    return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
> -}
> -#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
> -
> -#ifdef CONFIG_ARM_32
> -static inline void *maddr_to_virt(paddr_t ma)
> -{
> -    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
> -    ma -= mfn_to_maddr(directmap_mfn_start);
> -    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
> -}
> -#else
> -static inline void *maddr_to_virt(paddr_t ma)
> -{
> -    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
> -           (DIRECTMAP_SIZE >> PAGE_SHIFT));
> -    return (void *)(XENHEAP_VIRT_START -
> -                    (directmap_base_pdx << PAGE_SHIFT) +
> -                    ((ma & ma_va_bottom_mask) |
> -                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
> -}
> -#endif
> -
>   /*
>    * Translate a guest virtual address to a machine address.
>    * Return the fault information if the translation has failed else 0.
> diff --git a/xen/arch/arm/include/asm/mm_mmu.h b/xen/arch/arm/include/asm/mm_mmu.h
> index a5e63d8af8..6d7e5ddde7 100644
> --- a/xen/arch/arm/include/asm/mm_mmu.h
> +++ b/xen/arch/arm/include/asm/mm_mmu.h
> @@ -23,6 +23,32 @@ extern uint64_t init_ttbr;
>   extern void setup_directmap_mappings(unsigned long base_mfn,
>                                        unsigned long nr_mfns);
>   
> +static inline paddr_t __virt_to_maddr(vaddr_t va)
> +{
> +    uint64_t par = va_to_par(va);
> +    return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
> +}
> +#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
> +
> +#ifdef CONFIG_ARM_32
> +static inline void *maddr_to_virt(paddr_t ma)
> +{
> +    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
> +    ma -= mfn_to_maddr(directmap_mfn_start);
> +    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
> +}
> +#else
> +static inline void *maddr_to_virt(paddr_t ma)
> +{
> +    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
> +           (DIRECTMAP_SIZE >> PAGE_SHIFT));
> +    return (void *)(XENHEAP_VIRT_START -
> +                    (directmap_base_pdx << PAGE_SHIFT) +
> +                    ((ma & ma_va_bottom_mask) |
> +                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
> +}
> +#endif
> +
>   #endif /* __ARCH_ARM_MM_MMU__ */
>   
>   /*
> diff --git a/xen/arch/arm/include/asm/mm_mpu.h b/xen/arch/arm/include/asm/mm_mpu.h
> index 1f3cff7743..3a4b07f187 100644
> --- a/xen/arch/arm/include/asm/mm_mpu.h
> +++ b/xen/arch/arm/include/asm/mm_mpu.h
> @@ -4,6 +4,19 @@
>   
>   #define setup_mm_mappings(boot_phys_offset) ((void)(boot_phys_offset))
>   
> +static inline paddr_t __virt_to_maddr(vaddr_t va)
> +{
> +    /* In MPU system, VA == PA. */
> +    return (paddr_t)va;
> +}
> +#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))

This define is exactly the same as the MMU version. Can it be 
implemented in mm.h?

> +
> +static inline void *maddr_to_virt(paddr_t ma)
> +{
> +    /* In MPU system, VA == PA. */
> +    return (void *)ma;
> +}
> +
>   #endif /* __ARCH_ARM_MM_MPU__ */
>   
>   /*
> diff --git a/xen/arch/arm/mm_mpu.c b/xen/arch/arm/mm_mpu.c
> index 87a12042cc..c9e17ab6da 100644
> --- a/xen/arch/arm/mm_mpu.c
> +++ b/xen/arch/arm/mm_mpu.c
> @@ -29,6 +29,12 @@
>   pr_t __aligned(PAGE_SIZE) __section(".data.page_aligned")
>        xen_mpumap[ARM_MAX_MPU_MEMORY_REGIONS];
>   
> +/* Override macros from asm/page.h to make them work with mfn_t */
> +#undef virt_to_mfn
> +#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
> +#undef mfn_to_virt
> +#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))

They should be implemented when you need them.

> +
>   /* Index into MPU memory region map for fixed regions, ascending from zero. */
>   uint64_t __ro_after_init next_fixed_region_idx;
>   /*

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index 9b4c07d965..e29158028a 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -250,32 +250,6 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 /* Page-align address and convert to frame number format */
 #define paddr_to_pfn_aligned(paddr)    paddr_to_pfn(PAGE_ALIGN(paddr))
 
-static inline paddr_t __virt_to_maddr(vaddr_t va)
-{
-    uint64_t par = va_to_par(va);
-    return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
-}
-#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
-
-#ifdef CONFIG_ARM_32
-static inline void *maddr_to_virt(paddr_t ma)
-{
-    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
-    ma -= mfn_to_maddr(directmap_mfn_start);
-    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
-}
-#else
-static inline void *maddr_to_virt(paddr_t ma)
-{
-    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
-           (DIRECTMAP_SIZE >> PAGE_SHIFT));
-    return (void *)(XENHEAP_VIRT_START -
-                    (directmap_base_pdx << PAGE_SHIFT) +
-                    ((ma & ma_va_bottom_mask) |
-                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
-}
-#endif
-
 /*
  * Translate a guest virtual address to a machine address.
  * Return the fault information if the translation has failed else 0.
diff --git a/xen/arch/arm/include/asm/mm_mmu.h b/xen/arch/arm/include/asm/mm_mmu.h
index a5e63d8af8..6d7e5ddde7 100644
--- a/xen/arch/arm/include/asm/mm_mmu.h
+++ b/xen/arch/arm/include/asm/mm_mmu.h
@@ -23,6 +23,32 @@  extern uint64_t init_ttbr;
 extern void setup_directmap_mappings(unsigned long base_mfn,
                                      unsigned long nr_mfns);
 
+static inline paddr_t __virt_to_maddr(vaddr_t va)
+{
+    uint64_t par = va_to_par(va);
+    return (par & PADDR_MASK & PAGE_MASK) | (va & ~PAGE_MASK);
+}
+#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
+
+#ifdef CONFIG_ARM_32
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
+    ma -= mfn_to_maddr(directmap_mfn_start);
+    return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
+}
+#else
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
+           (DIRECTMAP_SIZE >> PAGE_SHIFT));
+    return (void *)(XENHEAP_VIRT_START -
+                    (directmap_base_pdx << PAGE_SHIFT) +
+                    ((ma & ma_va_bottom_mask) |
+                     ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
+}
+#endif
+
 #endif /* __ARCH_ARM_MM_MMU__ */
 
 /*
diff --git a/xen/arch/arm/include/asm/mm_mpu.h b/xen/arch/arm/include/asm/mm_mpu.h
index 1f3cff7743..3a4b07f187 100644
--- a/xen/arch/arm/include/asm/mm_mpu.h
+++ b/xen/arch/arm/include/asm/mm_mpu.h
@@ -4,6 +4,19 @@ 
 
 #define setup_mm_mappings(boot_phys_offset) ((void)(boot_phys_offset))
 
+static inline paddr_t __virt_to_maddr(vaddr_t va)
+{
+    /* In MPU system, VA == PA. */
+    return (paddr_t)va;
+}
+#define virt_to_maddr(va)   __virt_to_maddr((vaddr_t)(va))
+
+static inline void *maddr_to_virt(paddr_t ma)
+{
+    /* In MPU system, VA == PA. */
+    return (void *)ma;
+}
+
 #endif /* __ARCH_ARM_MM_MPU__ */
 
 /*
diff --git a/xen/arch/arm/mm_mpu.c b/xen/arch/arm/mm_mpu.c
index 87a12042cc..c9e17ab6da 100644
--- a/xen/arch/arm/mm_mpu.c
+++ b/xen/arch/arm/mm_mpu.c
@@ -29,6 +29,12 @@ 
 pr_t __aligned(PAGE_SIZE) __section(".data.page_aligned")
      xen_mpumap[ARM_MAX_MPU_MEMORY_REGIONS];
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef virt_to_mfn
+#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
+#undef mfn_to_virt
+#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
+
 /* Index into MPU memory region map for fixed regions, ascending from zero. */
 uint64_t __ro_after_init next_fixed_region_idx;
 /*