diff mbox series

[v3,36/52] xen/mpu: implememt ioremap_xxx in MPU

Message ID 20230626033443.2943270-37-Penny.Zheng@arm.com (mailing list archive)
State New, archived
Headers show
Series xen/arm: Add Armv8-R64 MPU support to Xen - Part#1 | expand

Commit Message

Penny Zheng June 26, 2023, 3:34 a.m. UTC
A set of function ioremap_xxx are designed to map deivce memory or
remap part of memory temporarily for short-time special purpose, like
using ioremap_wc to temporarily remap guest kernel non-cacheable, for
copying it to guest memory.

As virtual translation is not supported in MPU, and we always follow the
rule of "map in demand" in MPU, we implement MPU version of ioremap_xxx,
through mapping the memory with a transient MPU memory region.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Signed-off-by: Wei Chen <wei.chen@arm.com>
---
v3:
- adapt to the new rule of "map in demand"
---
 xen/arch/arm/include/asm/arm64/mpu.h |   4 +
 xen/arch/arm/include/asm/mm.h        |   6 +
 xen/arch/arm/mpu/mm.c                | 185 +++++++++++++++++++++++++++
 3 files changed, 195 insertions(+)

Comments

Ayan Kumar Halder July 5, 2023, 2:01 p.m. UTC | #1
Hi Penny,

On 26/06/2023 04:34, Penny Zheng wrote:
> CAUTION: This message has originated from an External Source. Please use proper judgment and caution when opening attachments, clicking links, or responding to this email.
>
>
> A set of function ioremap_xxx are designed to map deivce memory or
> remap part of memory temporarily for short-time special purpose, like
> using ioremap_wc to temporarily remap guest kernel non-cacheable, for
> copying it to guest memory.
>
> As virtual translation is not supported in MPU, and we always follow the
> rule of "map in demand" in MPU, we implement MPU version of ioremap_xxx,
> through mapping the memory with a transient MPU memory region.
>
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> ---
> v3:
> - adapt to the new rule of "map in demand"
> ---
>   xen/arch/arm/include/asm/arm64/mpu.h |   4 +
>   xen/arch/arm/include/asm/mm.h        |   6 +
>   xen/arch/arm/mpu/mm.c                | 185 +++++++++++++++++++++++++++
>   3 files changed, 195 insertions(+)
>
> diff --git a/xen/arch/arm/include/asm/arm64/mpu.h b/xen/arch/arm/include/asm/arm64/mpu.h
> index aee7947223..c5e69f239a 100644
> --- a/xen/arch/arm/include/asm/arm64/mpu.h
> +++ b/xen/arch/arm/include/asm/arm64/mpu.h
> @@ -121,6 +121,10 @@ static inline bool region_is_valid(pr_t *pr)
>       return pr->prlar.reg.en;
>   }
>
> +static inline bool region_is_transient(pr_t *pr)
> +{
> +    return pr->prlar.reg.tran;
> +}
>   #endif /* __ASSEMBLY__ */
>
>   #endif /* __ARM64_MPU_H__ */
> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
> index cffbf8a595..0352182d99 100644
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -227,6 +227,7 @@ void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int attributes);
>   extern int map_staticmem_pages_to_xen(paddr_t start, paddr_t end);
>   extern int unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end);
>
> +#ifndef CONFIG_HAS_MPU
>   static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
>   {
>       return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
> @@ -241,6 +242,11 @@ static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
>   {
>       return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
>   }
> +#else
> +extern void __iomem *ioremap_nocache(paddr_t start, size_t len);
> +extern void __iomem *ioremap_cache(paddr_t start, size_t len);
> +extern void __iomem *ioremap_wc(paddr_t start, size_t len);
> +#endif
>
>   /* XXX -- account for base */
>   #define mfn_valid(mfn)        ({                                              \
> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
> index 9d5c1da39c..3bb1a5c7c4 100644
> --- a/xen/arch/arm/mpu/mm.c
> +++ b/xen/arch/arm/mpu/mm.c
> @@ -624,6 +624,191 @@ int __init unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end)
>       return xen_mpumap_update(start, end, 0);
>   }
>
> +/*
> + * Check whether memory range [pa, pa + len) is mapped in Xen MPU
> + * memory mapping table xen_mpumap.
> + *
> + * If it is mapped, the associated index will be returned.
> + * If it is not mapped, INVALID_REGION_IDX will be returned.
> + */
> +static uint8_t is_mm_range_mapped(paddr_t pa, paddr_t len)
> +{
> +    int rc;
> +    uint8_t idx;
> +
> +    rc = mpumap_contain_region(xen_mpumap, max_xen_mpumap, pa, pa + len - 1,
> +                               &idx);
> +    if ( (rc == MPUMAP_REGION_FOUND) || (rc == MPUMAP_REGION_INCLUSIVE) )
> +        return idx;
> +
> +    if ( rc == MPUMAP_REGION_OVERLAP )
> +         panic("mpu: can not deal with overlapped MPU memory region\n");
> +    /* Not mapped */
> +    return INVALID_REGION_IDX;
> +}
> +
> +static bool is_mm_attr_match(pr_t *region, unsigned int attributes)
> +{
> +    if ( region->prbar.reg.ap != PAGE_AP_MASK(attributes) )
> +    {
> +        printk(XENLOG_WARNING "region permission is not matched (0x%x -> 0x%x)\n",
> +               region->prbar.reg.ap, PAGE_AP_MASK(attributes));
> +        return false;
> +    }
> +
> +    if ( region->prbar.reg.xn != PAGE_XN_MASK(attributes) )
> +    {
> +        printk(XENLOG_WARNING "region execution permission is not matched (0x%x -> 0x%x)\n",
> +               region->prbar.reg.xn, PAGE_XN_MASK(attributes));
> +        return false;
> +    }
> +
> +    if ( region->prlar.reg.ai != PAGE_AI_MASK(attributes) )
> +    {
> +        printk(XENLOG_WARNING "region memory attributes is not matched (0x%x -> 0x%x)\n",
> +               region->prlar.reg.ai, PAGE_AI_MASK(attributes));
> +        return false;
> +    }
> +
> +    return true;
> +}
> +
> +/*
> + * Check whether memory range [pa, pa + len) is mapped with memory
> + * attributes #attr in Xen MPU memory mapping table xen_mpumap.
> + *
> + * If it is mapped but with different memory attributes, Errno -EINVAL
> + * will be returned.
> + * If it is not mapped at all, Errno -ENOENT will be returned.
> + */
> +static int is_mm_range_mapped_with_attr(paddr_t pa, paddr_t len,
> +                                        unsigned int attr)
> +{
> +    uint8_t idx;
> +
> +    idx = is_mm_range_mapped(pa, len);
> +    if ( idx != INVALID_REGION_IDX )
> +    {
> +        pr_t *region;
> +
> +        region = &xen_mpumap[idx];
> +        if ( !is_mm_attr_match(region, attr) )
> +            return -EINVAL;
> +
> +        return 0;
> +    }
> +
> +    return -ENOENT;
> +}
> +
> +/*
> + * map_mm_range shall work with unmap_mm_range to map a chunk
> + * of memory with a transient MPU memory region for a period of short time.
> + */
> +static void *map_mm_range(paddr_t pa, size_t len, unsigned int attributes)
> +{
> +    if ( xen_mpumap_update(pa, pa + len, attributes | _PAGE_TRANSIENT) )
> +        printk(XENLOG_ERR "Failed to map_mm_range 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
> +               pa, pa + len);
Don't you want to return NULL or something from here ?
> +
> +    return maddr_to_virt(pa);
> +}
> +
> +static void unmap_mm_range(paddr_t pa)
> +{
> +    uint8_t idx;
> +
> +    /*
> +     * The mapping size in map_mm_range is at least PAGE_SIZE.
> +     * Find the MPU memory region mapped through map_mm_range, and associated
> +     * idx will be returned.
> +     */
> +    idx = is_mm_range_mapped(pa, PAGE_SIZE);
> +    if ( idx == INVALID_REGION_IDX )
> +    {
> +        printk(XENLOG_ERR "Failed to unmap_mm_range MPU memory region at 0x%"PRIpaddr"\n",
> +               pa);
> +        return;
> +    }
> +
> +    if ( !region_is_transient(&xen_mpumap[idx]) )
> +    {
> +        printk(XENLOG_WARNING "Failed to unmap MPU memory region at 0x%"PRIpaddr"\n, as it is not transient\n",
> +               pa);
> +        return;
> +    }

Does this mean you only allow unmapping of transient memory ?

So, is the non transient memory always expected to be mapped throughout 
the lifetime of the system ?

> +
> +    /* Disable MPU memory region and clear the according entry in xen_mpumap */
> +    control_mpu_region_from_index(idx, false);
> +}
> +
> +/*
> + * It works with "iounmap" as a pair to temporarily map a chunk of memory
> + * with a transient MPU memory region, for short-time special accessing.
> + */
> +void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
> +{
> +    return map_mm_range(round_pgdown(pa), round_pgup(len), attributes);
> +}
> +
> +/* ioremap_nocache is normally used to map device memory */
> +void __iomem *ioremap_nocache(paddr_t start, size_t len)
> +{
> +    int rc;

For this function and others (ioremap_xxx()), don't we need to check if 
the memory is transient ?

- Ayan

> +
> +    /* Check whether it is already mapped as device memory */
> +    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
> +    if ( rc == -ENOENT )
> +        return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
> +    else if ( rc != 0 )
> +        return NULL;
> +
> +    /* Already mapped */
> +    return maddr_to_virt(start);
> +}
> +
> +/*
> + * ioremap_cache which is working with iounmap as a pair, is normally used to
> + * map a chunck of cacheable memory temporarily for short-time special purpose.
> + */
> +void __iomem *ioremap_cache(paddr_t start, size_t len)
> +{
> +    int rc;
> +
> +    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR);
> +    if ( rc == -ENOENT )
> +        return ioremap_attr(start, len, PAGE_HYPERVISOR);
> +    else if ( rc != 0 )
> +        return NULL;
> +
> +    /* Already mapped */
> +    return maddr_to_virt(start);
> +}
> +
> +/*
> + * ioremap_wc which is working with iounmap as a pair, is normally used to
> + * map a chunck of non-cacheable memory temporarily for short-time special
> + * purpose.
> + */
> +void __iomem *ioremap_wc(paddr_t start, size_t len)
> +{
> +    int rc;
> +
> +    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR_WC);
> +    if ( rc == -ENOENT )
> +        ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
> +    else if ( rc != 0 )
> +        return NULL;
> +
> +    /* Already mapped */
> +    return maddr_to_virt(start);
> +}
> +
> +void iounmap(void __iomem *va)
> +{
> +    unmap_mm_range(virt_to_maddr(va));
> +}
> +
>   /*
>    * Local variables:
>    * mode: C
> --
> 2.25.1
>
>
Penny Zheng July 13, 2023, 7:09 a.m. UTC | #2
Hi Ayan

On 2023/7/5 22:01, Ayan Kumar Halder wrote:
> Hi Penny,
> 
> On 26/06/2023 04:34, Penny Zheng wrote:
>> CAUTION: This message has originated from an External Source. Please 
>> use proper judgment and caution when opening attachments, clicking 
>> links, or responding to this email.
>>
>>
>> A set of function ioremap_xxx are designed to map deivce memory or
>> remap part of memory temporarily for short-time special purpose, like
>> using ioremap_wc to temporarily remap guest kernel non-cacheable, for
>> copying it to guest memory.
>>
>> As virtual translation is not supported in MPU, and we always follow the
>> rule of "map in demand" in MPU, we implement MPU version of ioremap_xxx,
>> through mapping the memory with a transient MPU memory region.
>>
>> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
>> Signed-off-by: Wei Chen <wei.chen@arm.com>
>> ---
>> v3:
>> - adapt to the new rule of "map in demand"
>> ---
>>   xen/arch/arm/include/asm/arm64/mpu.h |   4 +
>>   xen/arch/arm/include/asm/mm.h        |   6 +
>>   xen/arch/arm/mpu/mm.c                | 185 +++++++++++++++++++++++++++
>>   3 files changed, 195 insertions(+)
>>
>> diff --git a/xen/arch/arm/include/asm/arm64/mpu.h 
>> b/xen/arch/arm/include/asm/arm64/mpu.h
>> index aee7947223..c5e69f239a 100644
>> --- a/xen/arch/arm/include/asm/arm64/mpu.h
>> +++ b/xen/arch/arm/include/asm/arm64/mpu.h
>> @@ -121,6 +121,10 @@ static inline bool region_is_valid(pr_t *pr)
>>       return pr->prlar.reg.en;
>>   }
>>
>> +static inline bool region_is_transient(pr_t *pr)
>> +{
>> +    return pr->prlar.reg.tran;
>> +}
>>   #endif /* __ASSEMBLY__ */
>>
>>   #endif /* __ARM64_MPU_H__ */
>> diff --git a/xen/arch/arm/include/asm/mm.h 
>> b/xen/arch/arm/include/asm/mm.h
>> index cffbf8a595..0352182d99 100644
>> --- a/xen/arch/arm/include/asm/mm.h
>> +++ b/xen/arch/arm/include/asm/mm.h
>> @@ -227,6 +227,7 @@ void __iomem *ioremap_attr(paddr_t start, size_t 
>> len, unsigned int attributes);
>>   extern int map_staticmem_pages_to_xen(paddr_t start, paddr_t end);
>>   extern int unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end);
>>
>> +#ifndef CONFIG_HAS_MPU
>>   static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
>>   {
>>       return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
>> @@ -241,6 +242,11 @@ static inline void __iomem *ioremap_wc(paddr_t 
>> start, size_t len)
>>   {
>>       return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
>>   }
>> +#else
>> +extern void __iomem *ioremap_nocache(paddr_t start, size_t len);
>> +extern void __iomem *ioremap_cache(paddr_t start, size_t len);
>> +extern void __iomem *ioremap_wc(paddr_t start, size_t len);
>> +#endif
>>
>>   /* XXX -- account for base */
>>   #define mfn_valid(mfn)        
>> ({                                              \
>> diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
>> index 9d5c1da39c..3bb1a5c7c4 100644
>> --- a/xen/arch/arm/mpu/mm.c
>> +++ b/xen/arch/arm/mpu/mm.c
>> @@ -624,6 +624,191 @@ int __init unmap_staticmem_pages_to_xen(paddr_t 
>> start, paddr_t end)
>>       return xen_mpumap_update(start, end, 0);
>>   }
>>
>> +/*
>> + * Check whether memory range [pa, pa + len) is mapped in Xen MPU
>> + * memory mapping table xen_mpumap.
>> + *
>> + * If it is mapped, the associated index will be returned.
>> + * If it is not mapped, INVALID_REGION_IDX will be returned.
>> + */
>> +static uint8_t is_mm_range_mapped(paddr_t pa, paddr_t len)
>> +{
>> +    int rc;
>> +    uint8_t idx;
>> +
>> +    rc = mpumap_contain_region(xen_mpumap, max_xen_mpumap, pa, pa + 
>> len - 1,
>> +                               &idx);
>> +    if ( (rc == MPUMAP_REGION_FOUND) || (rc == 
>> MPUMAP_REGION_INCLUSIVE) )
>> +        return idx;
>> +
>> +    if ( rc == MPUMAP_REGION_OVERLAP )
>> +         panic("mpu: can not deal with overlapped MPU memory region\n");
>> +    /* Not mapped */
>> +    return INVALID_REGION_IDX;
>> +}
>> +
>> +static bool is_mm_attr_match(pr_t *region, unsigned int attributes)
>> +{
>> +    if ( region->prbar.reg.ap != PAGE_AP_MASK(attributes) )
>> +    {
>> +        printk(XENLOG_WARNING "region permission is not matched (0x%x 
>> -> 0x%x)\n",
>> +               region->prbar.reg.ap, PAGE_AP_MASK(attributes));
>> +        return false;
>> +    }
>> +
>> +    if ( region->prbar.reg.xn != PAGE_XN_MASK(attributes) )
>> +    {
>> +        printk(XENLOG_WARNING "region execution permission is not 
>> matched (0x%x -> 0x%x)\n",
>> +               region->prbar.reg.xn, PAGE_XN_MASK(attributes));
>> +        return false;
>> +    }
>> +
>> +    if ( region->prlar.reg.ai != PAGE_AI_MASK(attributes) )
>> +    {
>> +        printk(XENLOG_WARNING "region memory attributes is not 
>> matched (0x%x -> 0x%x)\n",
>> +               region->prlar.reg.ai, PAGE_AI_MASK(attributes));
>> +        return false;
>> +    }
>> +
>> +    return true;
>> +}
>> +
>> +/*
>> + * Check whether memory range [pa, pa + len) is mapped with memory
>> + * attributes #attr in Xen MPU memory mapping table xen_mpumap.
>> + *
>> + * If it is mapped but with different memory attributes, Errno -EINVAL
>> + * will be returned.
>> + * If it is not mapped at all, Errno -ENOENT will be returned.
>> + */
>> +static int is_mm_range_mapped_with_attr(paddr_t pa, paddr_t len,
>> +                                        unsigned int attr)
>> +{
>> +    uint8_t idx;
>> +
>> +    idx = is_mm_range_mapped(pa, len);
>> +    if ( idx != INVALID_REGION_IDX )
>> +    {
>> +        pr_t *region;
>> +
>> +        region = &xen_mpumap[idx];
>> +        if ( !is_mm_attr_match(region, attr) )
>> +            return -EINVAL;
>> +
>> +        return 0;
>> +    }
>> +
>> +    return -ENOENT;
>> +}
>> +
>> +/*
>> + * map_mm_range shall work with unmap_mm_range to map a chunk
>> + * of memory with a transient MPU memory region for a period of short 
>> time.
>> + */
>> +static void *map_mm_range(paddr_t pa, size_t len, unsigned int 
>> attributes)
>> +{
>> +    if ( xen_mpumap_update(pa, pa + len, attributes | _PAGE_TRANSIENT) )
>> +        printk(XENLOG_ERR "Failed to map_mm_range 
>> 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
>> +               pa, pa + len);
> Don't you want to return NULL or something from here ?

Right, I shall return NULL. Will fix.

>> +
>> +    return maddr_to_virt(pa);
>> +}
>> +
>> +static void unmap_mm_range(paddr_t pa)
>> +{
>> +    uint8_t idx;
>> +
>> +    /*
>> +     * The mapping size in map_mm_range is at least PAGE_SIZE.
>> +     * Find the MPU memory region mapped through map_mm_range, and 
>> associated
>> +     * idx will be returned.
>> +     */
>> +    idx = is_mm_range_mapped(pa, PAGE_SIZE);
>> +    if ( idx == INVALID_REGION_IDX )
>> +    {
>> +        printk(XENLOG_ERR "Failed to unmap_mm_range MPU memory region 
>> at 0x%"PRIpaddr"\n",
>> +               pa);
>> +        return;
>> +    }
>> +
>> +    if ( !region_is_transient(&xen_mpumap[idx]) )
>> +    {
>> +        printk(XENLOG_WARNING "Failed to unmap MPU memory region at 
>> 0x%"PRIpaddr"\n, as it is not transient\n",
>> +               pa);
>> +        return;
>> +    }
> 
> Does this mean you only allow unmapping of transient memory ?

For this function, yes.

> 
> So, is the non transient memory always expected to be mapped throughout 
> the lifetime of the system ?
> 

Maybe I should rename this pair helpers to 
map_mm_transient()/unmap_mm_transient() for better understanding.
For mapping memory with transient MPU region, we are suggesting to
use map_mm_transient()/unmap_mm_transient().

You could see that, in Xen, Functions like, ioremap_xxx()/iounmap(),
map_domain_page()/unmap_domain_page(), are always working as a pair
and showing up with each other nearby. "Transient" is referring to
this kind of senario.
So for "non-transient" memory, it is more pointing to the memory not
mapped from functions like ioremap_xxx() and map_domain_page(). We could
use xen_mpumap_update to do the mapping/unmapping for them.

>> +
>> +    /* Disable MPU memory region and clear the according entry in 
>> xen_mpumap */
>> +    control_mpu_region_from_index(idx, false);
>> +}
>> +
>> +/*
>> + * It works with "iounmap" as a pair to temporarily map a chunk of 
>> memory
>> + * with a transient MPU memory region, for short-time special accessing.
>> + */
>> +void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
>> +{
>> +    return map_mm_range(round_pgdown(pa), round_pgup(len), attributes);
>> +}
>> +
>> +/* ioremap_nocache is normally used to map device memory */
>> +void __iomem *ioremap_nocache(paddr_t start, size_t len)
>> +{
>> +    int rc;
> 
> For this function and others (ioremap_xxx()), don't we need to check if 
> the memory is transient ?
> 

Checking if the memory is transient should lay in unmapping function, 
like iounmap(). Since in function iounmap(), we shall only unmap the 
"transient" memory, memory that is mapped through ioremap_xxx().

The reason why I add checking is_mm_range_mapped_with_attr() here is 
that, in some corner cases, memory is already mapped as expected, there 
is no need to map again. In MMU, it is only a redundant mapping. 
However, in MPU, it could lead to failure of "Overlapping MPU memory 
region".

For example,
If earlyprintk is enabled, uart0 will be mapped at assembly by 
setup_early_uart(), see commit
"[PATCH v3 26/52] xen/mpu: map early uart when earlyprintk on".
Then later when we are doing uart initialization in pl011_uart_init(),
ioremap_nocache() will try to map it twice if there is no checking.

> - Ayan
> 
>> +
>> +    /* Check whether it is already mapped as device memory */
>> +    rc = is_mm_range_mapped_with_attr(start, len, 
>> PAGE_HYPERVISOR_NOCACHE);
>> +    if ( rc == -ENOENT )
>> +        return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
>> +    else if ( rc != 0 )
>> +        return NULL;
>> +
>> +    /* Already mapped */
>> +    return maddr_to_virt(start);
>> +}
>> +
>> +/*
>> + * ioremap_cache which is working with iounmap as a pair, is normally 
>> used to
>> + * map a chunck of cacheable memory temporarily for short-time 
>> special purpose.
>> + */
>> +void __iomem *ioremap_cache(paddr_t start, size_t len)
>> +{
>> +    int rc;
>> +
>> +    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR);
>> +    if ( rc == -ENOENT )
>> +        return ioremap_attr(start, len, PAGE_HYPERVISOR);
>> +    else if ( rc != 0 )
>> +        return NULL;
>> +
>> +    /* Already mapped */
>> +    return maddr_to_virt(start);
>> +}
>> +
>> +/*
>> + * ioremap_wc which is working with iounmap as a pair, is normally 
>> used to
>> + * map a chunck of non-cacheable memory temporarily for short-time 
>> special
>> + * purpose.
>> + */
>> +void __iomem *ioremap_wc(paddr_t start, size_t len)
>> +{
>> +    int rc;
>> +
>> +    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR_WC);
>> +    if ( rc == -ENOENT )
>> +        ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
>> +    else if ( rc != 0 )
>> +        return NULL;
>> +
>> +    /* Already mapped */
>> +    return maddr_to_virt(start);
>> +}
>> +
>> +void iounmap(void __iomem *va)
>> +{
>> +    unmap_mm_range(virt_to_maddr(va));
>> +}
>> +
>>   /*
>>    * Local variables:
>>    * mode: C
>> -- 
>> 2.25.1
>>
>>
diff mbox series

Patch

diff --git a/xen/arch/arm/include/asm/arm64/mpu.h b/xen/arch/arm/include/asm/arm64/mpu.h
index aee7947223..c5e69f239a 100644
--- a/xen/arch/arm/include/asm/arm64/mpu.h
+++ b/xen/arch/arm/include/asm/arm64/mpu.h
@@ -121,6 +121,10 @@  static inline bool region_is_valid(pr_t *pr)
     return pr->prlar.reg.en;
 }
 
+static inline bool region_is_transient(pr_t *pr)
+{
+    return pr->prlar.reg.tran;
+}
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ARM64_MPU_H__ */
diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
index cffbf8a595..0352182d99 100644
--- a/xen/arch/arm/include/asm/mm.h
+++ b/xen/arch/arm/include/asm/mm.h
@@ -227,6 +227,7 @@  void __iomem *ioremap_attr(paddr_t start, size_t len, unsigned int attributes);
 extern int map_staticmem_pages_to_xen(paddr_t start, paddr_t end);
 extern int unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end);
 
+#ifndef CONFIG_HAS_MPU
 static inline void __iomem *ioremap_nocache(paddr_t start, size_t len)
 {
     return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
@@ -241,6 +242,11 @@  static inline void __iomem *ioremap_wc(paddr_t start, size_t len)
 {
     return ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
 }
+#else
+extern void __iomem *ioremap_nocache(paddr_t start, size_t len);
+extern void __iomem *ioremap_cache(paddr_t start, size_t len);
+extern void __iomem *ioremap_wc(paddr_t start, size_t len);
+#endif
 
 /* XXX -- account for base */
 #define mfn_valid(mfn)        ({                                              \
diff --git a/xen/arch/arm/mpu/mm.c b/xen/arch/arm/mpu/mm.c
index 9d5c1da39c..3bb1a5c7c4 100644
--- a/xen/arch/arm/mpu/mm.c
+++ b/xen/arch/arm/mpu/mm.c
@@ -624,6 +624,191 @@  int __init unmap_staticmem_pages_to_xen(paddr_t start, paddr_t end)
     return xen_mpumap_update(start, end, 0);
 }
 
+/*
+ * Check whether memory range [pa, pa + len) is mapped in Xen MPU
+ * memory mapping table xen_mpumap.
+ *
+ * If it is mapped, the associated index will be returned.
+ * If it is not mapped, INVALID_REGION_IDX will be returned.
+ */
+static uint8_t is_mm_range_mapped(paddr_t pa, paddr_t len)
+{
+    int rc;
+    uint8_t idx;
+
+    rc = mpumap_contain_region(xen_mpumap, max_xen_mpumap, pa, pa + len - 1,
+                               &idx);
+    if ( (rc == MPUMAP_REGION_FOUND) || (rc == MPUMAP_REGION_INCLUSIVE) )
+        return idx;
+
+    if ( rc == MPUMAP_REGION_OVERLAP )
+         panic("mpu: can not deal with overlapped MPU memory region\n");
+    /* Not mapped */
+    return INVALID_REGION_IDX;
+}
+
+static bool is_mm_attr_match(pr_t *region, unsigned int attributes)
+{
+    if ( region->prbar.reg.ap != PAGE_AP_MASK(attributes) )
+    {
+        printk(XENLOG_WARNING "region permission is not matched (0x%x -> 0x%x)\n",
+               region->prbar.reg.ap, PAGE_AP_MASK(attributes));
+        return false;
+    }
+
+    if ( region->prbar.reg.xn != PAGE_XN_MASK(attributes) )
+    {
+        printk(XENLOG_WARNING "region execution permission is not matched (0x%x -> 0x%x)\n",
+               region->prbar.reg.xn, PAGE_XN_MASK(attributes));
+        return false;
+    }
+
+    if ( region->prlar.reg.ai != PAGE_AI_MASK(attributes) )
+    {
+        printk(XENLOG_WARNING "region memory attributes is not matched (0x%x -> 0x%x)\n",
+               region->prlar.reg.ai, PAGE_AI_MASK(attributes));
+        return false;
+    }
+
+    return true;
+}
+
+/*
+ * Check whether memory range [pa, pa + len) is mapped with memory
+ * attributes #attr in Xen MPU memory mapping table xen_mpumap.
+ *
+ * If it is mapped but with different memory attributes, Errno -EINVAL
+ * will be returned.
+ * If it is not mapped at all, Errno -ENOENT will be returned.
+ */
+static int is_mm_range_mapped_with_attr(paddr_t pa, paddr_t len,
+                                        unsigned int attr)
+{
+    uint8_t idx;
+
+    idx = is_mm_range_mapped(pa, len);
+    if ( idx != INVALID_REGION_IDX )
+    {
+        pr_t *region;
+
+        region = &xen_mpumap[idx];
+        if ( !is_mm_attr_match(region, attr) )
+            return -EINVAL;
+
+        return 0;
+    }
+
+    return -ENOENT;
+}
+
+/*
+ * map_mm_range shall work with unmap_mm_range to map a chunk
+ * of memory with a transient MPU memory region for a period of short time.
+ */
+static void *map_mm_range(paddr_t pa, size_t len, unsigned int attributes)
+{
+    if ( xen_mpumap_update(pa, pa + len, attributes | _PAGE_TRANSIENT) )
+        printk(XENLOG_ERR "Failed to map_mm_range 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
+               pa, pa + len);
+
+    return maddr_to_virt(pa);
+}
+
+static void unmap_mm_range(paddr_t pa)
+{
+    uint8_t idx;
+
+    /*
+     * The mapping size in map_mm_range is at least PAGE_SIZE.
+     * Find the MPU memory region mapped through map_mm_range, and associated
+     * idx will be returned.
+     */
+    idx = is_mm_range_mapped(pa, PAGE_SIZE);
+    if ( idx == INVALID_REGION_IDX )
+    {
+        printk(XENLOG_ERR "Failed to unmap_mm_range MPU memory region at 0x%"PRIpaddr"\n",
+               pa);
+        return;
+    }
+
+    if ( !region_is_transient(&xen_mpumap[idx]) )
+    {
+        printk(XENLOG_WARNING "Failed to unmap MPU memory region at 0x%"PRIpaddr"\n, as it is not transient\n",
+               pa);
+        return;
+    }
+
+    /* Disable MPU memory region and clear the according entry in xen_mpumap */
+    control_mpu_region_from_index(idx, false);
+}
+
+/*
+ * It works with "iounmap" as a pair to temporarily map a chunk of memory
+ * with a transient MPU memory region, for short-time special accessing.
+ */
+void *ioremap_attr(paddr_t pa, size_t len, unsigned int attributes)
+{
+    return map_mm_range(round_pgdown(pa), round_pgup(len), attributes);
+}
+
+/* ioremap_nocache is normally used to map device memory */
+void __iomem *ioremap_nocache(paddr_t start, size_t len)
+{
+    int rc;
+
+    /* Check whether it is already mapped as device memory */
+    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
+    if ( rc == -ENOENT )
+        return ioremap_attr(start, len, PAGE_HYPERVISOR_NOCACHE);
+    else if ( rc != 0 )
+        return NULL;
+
+    /* Already mapped */
+    return maddr_to_virt(start);
+}
+
+/*
+ * ioremap_cache which is working with iounmap as a pair, is normally used to
+ * map a chunck of cacheable memory temporarily for short-time special purpose.
+ */
+void __iomem *ioremap_cache(paddr_t start, size_t len)
+{
+    int rc;
+
+    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR);
+    if ( rc == -ENOENT )
+        return ioremap_attr(start, len, PAGE_HYPERVISOR);
+    else if ( rc != 0 )
+        return NULL;
+
+    /* Already mapped */
+    return maddr_to_virt(start);
+}
+
+/*
+ * ioremap_wc which is working with iounmap as a pair, is normally used to
+ * map a chunck of non-cacheable memory temporarily for short-time special
+ * purpose.
+ */
+void __iomem *ioremap_wc(paddr_t start, size_t len)
+{
+    int rc;
+
+    rc = is_mm_range_mapped_with_attr(start, len, PAGE_HYPERVISOR_WC);
+    if ( rc == -ENOENT )
+        ioremap_attr(start, len, PAGE_HYPERVISOR_WC);
+    else if ( rc != 0 )
+        return NULL;
+
+    /* Already mapped */
+    return maddr_to_virt(start);
+}
+
+void iounmap(void __iomem *va)
+{
+    unmap_mm_range(virt_to_maddr(va));
+}
+
 /*
  * Local variables:
  * mode: C