diff mbox series

[MM-PART1,v3,5/8] xen/arm: page: Clarify the Xen TLBs helpers name

Message ID 20190514121132.26732-6-julien.grall@arm.com (mailing list archive)
State New, archived
Headers show
Series xen/arm: TLB flush helpers rework | expand

Commit Message

Julien Grall May 14, 2019, 12:11 p.m. UTC
Now that we dropped flush_xen_text_tlb_local(), we have only one set of
helpers acting on Xen TLBs. There naming are quite confusing because the
TLB instructions used will act on both Data and Instruction TLBs.

Take the opportunity to rework the documentation which can be confusing
to read as they don't match the implementation. Note the mention about
the instruction cache maintenance has been removed because modifying
mapping does not require instruction cache maintenance.

Lastly, switch from unsigned long to vaddr_t as the function technically
deal with virtual address.

Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>

---
    Changes in v3:
        - Update commit message
        - Fix typoes

    Changes in v2:
        - Add Andrii's reviewed-by
---
 xen/arch/arm/mm.c                | 18 +++++++++---------
 xen/include/asm-arm/arm32/page.h | 15 +++++----------
 xen/include/asm-arm/arm64/page.h | 15 +++++----------
 xen/include/asm-arm/page.h       | 28 ++++++++++++++--------------
 4 files changed, 33 insertions(+), 43 deletions(-)

Comments

Julien Grall May 29, 2019, 4:44 p.m. UTC | #1
Gentle ping

On 14/05/2019 13:11, Julien Grall wrote:
> Now that we dropped flush_xen_text_tlb_local(), we have only one set of
> helpers acting on Xen TLBs. There naming are quite confusing because the
> TLB instructions used will act on both Data and Instruction TLBs.
> 
> Take the opportunity to rework the documentation which can be confusing
> to read as they don't match the implementation. Note the mention about
> the instruction cache maintenance has been removed because modifying
> mapping does not require instruction cache maintenance.
> 
> Lastly, switch from unsigned long to vaddr_t as the function technically
> deal with virtual address.
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>
> Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
> 
> ---
>      Changes in v3:
>          - Update commit message
>          - Fix typoes
> 
>      Changes in v2:
>          - Add Andrii's reviewed-by
> ---
>   xen/arch/arm/mm.c                | 18 +++++++++---------
>   xen/include/asm-arm/arm32/page.h | 15 +++++----------
>   xen/include/asm-arm/arm64/page.h | 15 +++++----------
>   xen/include/asm-arm/page.h       | 28 ++++++++++++++--------------
>   4 files changed, 33 insertions(+), 43 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index dfbe39c70a..8ee828d445 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -335,7 +335,7 @@ void set_fixmap(unsigned map, mfn_t mfn, unsigned int flags)
>       pte.pt.table = 1; /* 4k mappings always have this bit set */
>       pte.pt.xn = 1;
>       write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
> -    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
> +    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>   }
>   
>   /* Remove a mapping from a fixmap entry */
> @@ -343,7 +343,7 @@ void clear_fixmap(unsigned map)
>   {
>       lpae_t pte = {0};
>       write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
> -    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
> +    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>   }
>   
>   /* Create Xen's mappings of memory.
> @@ -377,7 +377,7 @@ static void __init create_mappings(lpae_t *second,
>           write_pte(p + i, pte);
>           pte.pt.base += 1 << LPAE_SHIFT;
>       }
> -    flush_xen_data_tlb_local();
> +    flush_xen_tlb_local();
>   }
>   
>   #ifdef CONFIG_DOMAIN_PAGE
> @@ -455,7 +455,7 @@ void *map_domain_page(mfn_t mfn)
>        * We may not have flushed this specific subpage at map time,
>        * since we only flush the 4k page not the superpage
>        */
> -    flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
> +    flush_xen_tlb_range_va_local(va, PAGE_SIZE);
>   
>       return (void *)va;
>   }
> @@ -598,7 +598,7 @@ void __init remove_early_mappings(void)
>       write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
>       write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START + SZ_2M),
>                 pte);
> -    flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
> +    flush_xen_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
>   }
>   
>   /*
> @@ -615,7 +615,7 @@ static void xen_pt_enforce_wnx(void)
>        * before flushing the TLBs.
>        */
>       isb();
> -    flush_xen_data_tlb_local();
> +    flush_xen_tlb_local();
>   }
>   
>   extern void switch_ttbr(uint64_t ttbr);
> @@ -879,7 +879,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>           vaddr += FIRST_SIZE;
>       }
>   
> -    flush_xen_data_tlb_local();
> +    flush_xen_tlb_local();
>   }
>   #endif
>   
> @@ -1052,7 +1052,7 @@ static int create_xen_entries(enum xenmap_operation op,
>                   BUG();
>           }
>       }
> -    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
> +    flush_xen_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
>   
>       rc = 0;
>   
> @@ -1127,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
>           }
>           write_pte(xen_xenmap + i, pte);
>       }
> -    flush_xen_data_tlb_local();
> +    flush_xen_tlb_local();
>   }
>   
>   /* Release all __init and __initdata ranges to be reused */
> diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
> index 40a77daa9d..0b41b9214b 100644
> --- a/xen/include/asm-arm/arm32/page.h
> +++ b/xen/include/asm-arm/arm32/page.h
> @@ -61,12 +61,8 @@ static inline void invalidate_icache_local(void)
>       isb();                      /* Synchronize fetched instruction stream. */
>   }
>   
> -/*
> - * Flush all hypervisor mappings from the data TLB of the local
> - * processor. This is not sufficient when changing code mappings or
> - * for self modifying code.
> - */
> -static inline void flush_xen_data_tlb_local(void)
> +/* Flush all hypervisor mappings from the TLB of the local processor. */
> +static inline void flush_xen_tlb_local(void)
>   {
>       asm volatile("dsb;" /* Ensure preceding are visible */
>                    CMD_CP32(TLBIALLH)
> @@ -76,14 +72,13 @@ static inline void flush_xen_data_tlb_local(void)
>   }
>   
>   /* Flush TLB of local processor for address va. */
> -static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
> +static inline void __flush_xen_tlb_one_local(vaddr_t va)
>   {
>       asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
>   }
>   
> -/* Flush TLB of all processors in the inner-shareable domain for
> - * address va. */
> -static inline void __flush_xen_data_tlb_one(vaddr_t va)
> +/* Flush TLB of all processors in the inner-shareable domain for address va. */
> +static inline void __flush_xen_tlb_one(vaddr_t va)
>   {
>       asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
>   }
> diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
> index 6c36d0210f..31d04ecf76 100644
> --- a/xen/include/asm-arm/arm64/page.h
> +++ b/xen/include/asm-arm/arm64/page.h
> @@ -45,12 +45,8 @@ static inline void invalidate_icache_local(void)
>       isb();
>   }
>   
> -/*
> - * Flush all hypervisor mappings from the data TLB of the local
> - * processor. This is not sufficient when changing code mappings or
> - * for self modifying code.
> - */
> -static inline void flush_xen_data_tlb_local(void)
> +/* Flush all hypervisor mappings from the TLB of the local processor. */
> +static inline void flush_xen_tlb_local(void)
>   {
>       asm volatile (
>           "dsb    sy;"                    /* Ensure visibility of PTE writes */
> @@ -61,14 +57,13 @@ static inline void flush_xen_data_tlb_local(void)
>   }
>   
>   /* Flush TLB of local processor for address va. */
> -static inline void  __flush_xen_data_tlb_one_local(vaddr_t va)
> +static inline void  __flush_xen_tlb_one_local(vaddr_t va)
>   {
>       asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
>   }
>   
> -/* Flush TLB of all processors in the inner-shareable domain for
> - * address va. */
> -static inline void __flush_xen_data_tlb_one(vaddr_t va)
> +/* Flush TLB of all processors in the inner-shareable domain for address va. */
> +static inline void __flush_xen_tlb_one(vaddr_t va)
>   {
>       asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
>   }
> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
> index 1a1713ce02..195345e24a 100644
> --- a/xen/include/asm-arm/page.h
> +++ b/xen/include/asm-arm/page.h
> @@ -234,18 +234,18 @@ static inline int clean_and_invalidate_dcache_va_range
>   } while (0)
>   
>   /*
> - * Flush a range of VA's hypervisor mappings from the data TLB of the
> - * local processor. This is not sufficient when changing code mappings
> - * or for self modifying code.
> + * Flush a range of VA's hypervisor mappings from the TLB of the local
> + * processor.
>    */
> -static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
> -                                                     unsigned long size)
> +static inline void flush_xen_tlb_range_va_local(vaddr_t va,
> +                                                unsigned long size)
>   {
> -    unsigned long end = va + size;
> +    vaddr_t end = va + size;
> +
>       dsb(sy); /* Ensure preceding are visible */
>       while ( va < end )
>       {
> -        __flush_xen_data_tlb_one_local(va);
> +        __flush_xen_tlb_one_local(va);
>           va += PAGE_SIZE;
>       }
>       dsb(sy); /* Ensure completion of the TLB flush */
> @@ -253,18 +253,18 @@ static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
>   }
>   
>   /*
> - * Flush a range of VA's hypervisor mappings from the data TLB of all
> - * processors in the inner-shareable domain. This is not sufficient
> - * when changing code mappings or for self modifying code.
> + * Flush a range of VA's hypervisor mappings from the TLB of all
> + * processors in the inner-shareable domain.
>    */
> -static inline void flush_xen_data_tlb_range_va(unsigned long va,
> -                                               unsigned long size)
> +static inline void flush_xen_tlb_range_va(vaddr_t va,
> +                                          unsigned long size)
>   {
> -    unsigned long end = va + size;
> +    vaddr_t end = va + size;
> +
>       dsb(sy); /* Ensure preceding are visible */
>       while ( va < end )
>       {
> -        __flush_xen_data_tlb_one(va);
> +        __flush_xen_tlb_one(va);
>           va += PAGE_SIZE;
>       }
>       dsb(sy); /* Ensure completion of the TLB flush */
>
Julien Grall June 10, 2019, 10:06 a.m. UTC | #2
(+ Committers)

Ping again... I have quite a few patches blocked on this work.

There was a discussion on v2 (see [1]) and pointed out that the comment 
suggested was out-of-context. I am still waiting on input from Stefano to 
clarify the relation...

Cheers,

[1] https://lists.xenproject.org/archives/html/xen-devel/2019-05/msg01708.html

On 29/05/2019 17:44, Julien Grall wrote:
> Gentle ping
> 
> On 14/05/2019 13:11, Julien Grall wrote:
>> Now that we dropped flush_xen_text_tlb_local(), we have only one set of
>> helpers acting on Xen TLBs. There naming are quite confusing because the
>> TLB instructions used will act on both Data and Instruction TLBs.
>>
>> Take the opportunity to rework the documentation which can be confusing
>> to read as they don't match the implementation. Note the mention about
>> the instruction cache maintenance has been removed because modifying
>> mapping does not require instruction cache maintenance.
>>
>> Lastly, switch from unsigned long to vaddr_t as the function technically
>> deal with virtual address.
>>
>> Signed-off-by: Julien Grall <julien.grall@arm.com>
>> Reviewed-by: Andrii Anisov <andrii_anisov@epam.com>
>>
>> ---
>>      Changes in v3:
>>          - Update commit message
>>          - Fix typoes
>>
>>      Changes in v2:
>>          - Add Andrii's reviewed-by
>> ---
>>   xen/arch/arm/mm.c                | 18 +++++++++---------
>>   xen/include/asm-arm/arm32/page.h | 15 +++++----------
>>   xen/include/asm-arm/arm64/page.h | 15 +++++----------
>>   xen/include/asm-arm/page.h       | 28 ++++++++++++++--------------
>>   4 files changed, 33 insertions(+), 43 deletions(-)
>>
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index dfbe39c70a..8ee828d445 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -335,7 +335,7 @@ void set_fixmap(unsigned map, mfn_t mfn, unsigned int flags)
>>       pte.pt.table = 1; /* 4k mappings always have this bit set */
>>       pte.pt.xn = 1;
>>       write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
>> -    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>> +    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>>   }
>>   /* Remove a mapping from a fixmap entry */
>> @@ -343,7 +343,7 @@ void clear_fixmap(unsigned map)
>>   {
>>       lpae_t pte = {0};
>>       write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
>> -    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>> +    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
>>   }
>>   /* Create Xen's mappings of memory.
>> @@ -377,7 +377,7 @@ static void __init create_mappings(lpae_t *second,
>>           write_pte(p + i, pte);
>>           pte.pt.base += 1 << LPAE_SHIFT;
>>       }
>> -    flush_xen_data_tlb_local();
>> +    flush_xen_tlb_local();
>>   }
>>   #ifdef CONFIG_DOMAIN_PAGE
>> @@ -455,7 +455,7 @@ void *map_domain_page(mfn_t mfn)
>>        * We may not have flushed this specific subpage at map time,
>>        * since we only flush the 4k page not the superpage
>>        */
>> -    flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
>> +    flush_xen_tlb_range_va_local(va, PAGE_SIZE);
>>       return (void *)va;
>>   }
>> @@ -598,7 +598,7 @@ void __init remove_early_mappings(void)
>>       write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
>>       write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START + SZ_2M),
>>                 pte);
>> -    flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
>> +    flush_xen_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
>>   }
>>   /*
>> @@ -615,7 +615,7 @@ static void xen_pt_enforce_wnx(void)
>>        * before flushing the TLBs.
>>        */
>>       isb();
>> -    flush_xen_data_tlb_local();
>> +    flush_xen_tlb_local();
>>   }
>>   extern void switch_ttbr(uint64_t ttbr);
>> @@ -879,7 +879,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>>           vaddr += FIRST_SIZE;
>>       }
>> -    flush_xen_data_tlb_local();
>> +    flush_xen_tlb_local();
>>   }
>>   #endif
>> @@ -1052,7 +1052,7 @@ static int create_xen_entries(enum xenmap_operation op,
>>                   BUG();
>>           }
>>       }
>> -    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
>> +    flush_xen_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
>>       rc = 0;
>> @@ -1127,7 +1127,7 @@ static void set_pte_flags_on_range(const char *p, 
>> unsigned long l, enum mg mg)
>>           }
>>           write_pte(xen_xenmap + i, pte);
>>       }
>> -    flush_xen_data_tlb_local();
>> +    flush_xen_tlb_local();
>>   }
>>   /* Release all __init and __initdata ranges to be reused */
>> diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
>> index 40a77daa9d..0b41b9214b 100644
>> --- a/xen/include/asm-arm/arm32/page.h
>> +++ b/xen/include/asm-arm/arm32/page.h
>> @@ -61,12 +61,8 @@ static inline void invalidate_icache_local(void)
>>       isb();                      /* Synchronize fetched instruction stream. */
>>   }
>> -/*
>> - * Flush all hypervisor mappings from the data TLB of the local
>> - * processor. This is not sufficient when changing code mappings or
>> - * for self modifying code.
>> - */
>> -static inline void flush_xen_data_tlb_local(void)
>> +/* Flush all hypervisor mappings from the TLB of the local processor. */
>> +static inline void flush_xen_tlb_local(void)
>>   {
>>       asm volatile("dsb;" /* Ensure preceding are visible */
>>                    CMD_CP32(TLBIALLH)
>> @@ -76,14 +72,13 @@ static inline void flush_xen_data_tlb_local(void)
>>   }
>>   /* Flush TLB of local processor for address va. */
>> -static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
>> +static inline void __flush_xen_tlb_one_local(vaddr_t va)
>>   {
>>       asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
>>   }
>> -/* Flush TLB of all processors in the inner-shareable domain for
>> - * address va. */
>> -static inline void __flush_xen_data_tlb_one(vaddr_t va)
>> +/* Flush TLB of all processors in the inner-shareable domain for address va. */
>> +static inline void __flush_xen_tlb_one(vaddr_t va)
>>   {
>>       asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
>>   }
>> diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
>> index 6c36d0210f..31d04ecf76 100644
>> --- a/xen/include/asm-arm/arm64/page.h
>> +++ b/xen/include/asm-arm/arm64/page.h
>> @@ -45,12 +45,8 @@ static inline void invalidate_icache_local(void)
>>       isb();
>>   }
>> -/*
>> - * Flush all hypervisor mappings from the data TLB of the local
>> - * processor. This is not sufficient when changing code mappings or
>> - * for self modifying code.
>> - */
>> -static inline void flush_xen_data_tlb_local(void)
>> +/* Flush all hypervisor mappings from the TLB of the local processor. */
>> +static inline void flush_xen_tlb_local(void)
>>   {
>>       asm volatile (
>>           "dsb    sy;"                    /* Ensure visibility of PTE writes */
>> @@ -61,14 +57,13 @@ static inline void flush_xen_data_tlb_local(void)
>>   }
>>   /* Flush TLB of local processor for address va. */
>> -static inline void  __flush_xen_data_tlb_one_local(vaddr_t va)
>> +static inline void  __flush_xen_tlb_one_local(vaddr_t va)
>>   {
>>       asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
>>   }
>> -/* Flush TLB of all processors in the inner-shareable domain for
>> - * address va. */
>> -static inline void __flush_xen_data_tlb_one(vaddr_t va)
>> +/* Flush TLB of all processors in the inner-shareable domain for address va. */
>> +static inline void __flush_xen_tlb_one(vaddr_t va)
>>   {
>>       asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
>>   }
>> diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
>> index 1a1713ce02..195345e24a 100644
>> --- a/xen/include/asm-arm/page.h
>> +++ b/xen/include/asm-arm/page.h
>> @@ -234,18 +234,18 @@ static inline int clean_and_invalidate_dcache_va_range
>>   } while (0)
>>   /*
>> - * Flush a range of VA's hypervisor mappings from the data TLB of the
>> - * local processor. This is not sufficient when changing code mappings
>> - * or for self modifying code.
>> + * Flush a range of VA's hypervisor mappings from the TLB of the local
>> + * processor.
>>    */
>> -static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
>> -                                                     unsigned long size)
>> +static inline void flush_xen_tlb_range_va_local(vaddr_t va,
>> +                                                unsigned long size)
>>   {
>> -    unsigned long end = va + size;
>> +    vaddr_t end = va + size;
>> +
>>       dsb(sy); /* Ensure preceding are visible */
>>       while ( va < end )
>>       {
>> -        __flush_xen_data_tlb_one_local(va);
>> +        __flush_xen_tlb_one_local(va);
>>           va += PAGE_SIZE;
>>       }
>>       dsb(sy); /* Ensure completion of the TLB flush */
>> @@ -253,18 +253,18 @@ static inline void 
>> flush_xen_data_tlb_range_va_local(unsigned long va,
>>   }
>>   /*
>> - * Flush a range of VA's hypervisor mappings from the data TLB of all
>> - * processors in the inner-shareable domain. This is not sufficient
>> - * when changing code mappings or for self modifying code.
>> + * Flush a range of VA's hypervisor mappings from the TLB of all
>> + * processors in the inner-shareable domain.
>>    */
>> -static inline void flush_xen_data_tlb_range_va(unsigned long va,
>> -                                               unsigned long size)
>> +static inline void flush_xen_tlb_range_va(vaddr_t va,
>> +                                          unsigned long size)
>>   {
>> -    unsigned long end = va + size;
>> +    vaddr_t end = va + size;
>> +
>>       dsb(sy); /* Ensure preceding are visible */
>>       while ( va < end )
>>       {
>> -        __flush_xen_data_tlb_one(va);
>> +        __flush_xen_tlb_one(va);
>>           va += PAGE_SIZE;
>>       }
>>       dsb(sy); /* Ensure completion of the TLB flush */
>>
>
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index dfbe39c70a..8ee828d445 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -335,7 +335,7 @@  void set_fixmap(unsigned map, mfn_t mfn, unsigned int flags)
     pte.pt.table = 1; /* 4k mappings always have this bit set */
     pte.pt.xn = 1;
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 /* Remove a mapping from a fixmap entry */
@@ -343,7 +343,7 @@  void clear_fixmap(unsigned map)
 {
     lpae_t pte = {0};
     write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
-    flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
+    flush_xen_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
 }
 
 /* Create Xen's mappings of memory.
@@ -377,7 +377,7 @@  static void __init create_mappings(lpae_t *second,
         write_pte(p + i, pte);
         pte.pt.base += 1 << LPAE_SHIFT;
     }
-    flush_xen_data_tlb_local();
+    flush_xen_tlb_local();
 }
 
 #ifdef CONFIG_DOMAIN_PAGE
@@ -455,7 +455,7 @@  void *map_domain_page(mfn_t mfn)
      * We may not have flushed this specific subpage at map time,
      * since we only flush the 4k page not the superpage
      */
-    flush_xen_data_tlb_range_va_local(va, PAGE_SIZE);
+    flush_xen_tlb_range_va_local(va, PAGE_SIZE);
 
     return (void *)va;
 }
@@ -598,7 +598,7 @@  void __init remove_early_mappings(void)
     write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START), pte);
     write_pte(xen_second + second_table_offset(BOOT_FDT_VIRT_START + SZ_2M),
               pte);
-    flush_xen_data_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
+    flush_xen_tlb_range_va(BOOT_FDT_VIRT_START, BOOT_FDT_SLOT_SIZE);
 }
 
 /*
@@ -615,7 +615,7 @@  static void xen_pt_enforce_wnx(void)
      * before flushing the TLBs.
      */
     isb();
-    flush_xen_data_tlb_local();
+    flush_xen_tlb_local();
 }
 
 extern void switch_ttbr(uint64_t ttbr);
@@ -879,7 +879,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
         vaddr += FIRST_SIZE;
     }
 
-    flush_xen_data_tlb_local();
+    flush_xen_tlb_local();
 }
 #endif
 
@@ -1052,7 +1052,7 @@  static int create_xen_entries(enum xenmap_operation op,
                 BUG();
         }
     }
-    flush_xen_data_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
+    flush_xen_tlb_range_va(virt, PAGE_SIZE * nr_mfns);
 
     rc = 0;
 
@@ -1127,7 +1127,7 @@  static void set_pte_flags_on_range(const char *p, unsigned long l, enum mg mg)
         }
         write_pte(xen_xenmap + i, pte);
     }
-    flush_xen_data_tlb_local();
+    flush_xen_tlb_local();
 }
 
 /* Release all __init and __initdata ranges to be reused */
diff --git a/xen/include/asm-arm/arm32/page.h b/xen/include/asm-arm/arm32/page.h
index 40a77daa9d..0b41b9214b 100644
--- a/xen/include/asm-arm/arm32/page.h
+++ b/xen/include/asm-arm/arm32/page.h
@@ -61,12 +61,8 @@  static inline void invalidate_icache_local(void)
     isb();                      /* Synchronize fetched instruction stream. */
 }
 
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
 {
     asm volatile("dsb;" /* Ensure preceding are visible */
                  CMD_CP32(TLBIALLH)
@@ -76,14 +72,13 @@  static inline void flush_xen_data_tlb_local(void)
 }
 
 /* Flush TLB of local processor for address va. */
-static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void __flush_xen_tlb_one_local(vaddr_t va)
 {
     asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
 }
 
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
 {
     asm volatile(STORE_CP32(0, TLBIMVAHIS) : : "r" (va) : "memory");
 }
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 6c36d0210f..31d04ecf76 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -45,12 +45,8 @@  static inline void invalidate_icache_local(void)
     isb();
 }
 
-/*
- * Flush all hypervisor mappings from the data TLB of the local
- * processor. This is not sufficient when changing code mappings or
- * for self modifying code.
- */
-static inline void flush_xen_data_tlb_local(void)
+/* Flush all hypervisor mappings from the TLB of the local processor. */
+static inline void flush_xen_tlb_local(void)
 {
     asm volatile (
         "dsb    sy;"                    /* Ensure visibility of PTE writes */
@@ -61,14 +57,13 @@  static inline void flush_xen_data_tlb_local(void)
 }
 
 /* Flush TLB of local processor for address va. */
-static inline void  __flush_xen_data_tlb_one_local(vaddr_t va)
+static inline void  __flush_xen_tlb_one_local(vaddr_t va)
 {
     asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
 }
 
-/* Flush TLB of all processors in the inner-shareable domain for
- * address va. */
-static inline void __flush_xen_data_tlb_one(vaddr_t va)
+/* Flush TLB of all processors in the inner-shareable domain for address va. */
+static inline void __flush_xen_tlb_one(vaddr_t va)
 {
     asm volatile("tlbi vae2is, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
 }
diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
index 1a1713ce02..195345e24a 100644
--- a/xen/include/asm-arm/page.h
+++ b/xen/include/asm-arm/page.h
@@ -234,18 +234,18 @@  static inline int clean_and_invalidate_dcache_va_range
 } while (0)
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of the local
+ * processor.
  */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
-                                                     unsigned long size)
+static inline void flush_xen_tlb_range_va_local(vaddr_t va,
+                                                unsigned long size)
 {
-    unsigned long end = va + size;
+    vaddr_t end = va + size;
+
     dsb(sy); /* Ensure preceding are visible */
     while ( va < end )
     {
-        __flush_xen_data_tlb_one_local(va);
+        __flush_xen_tlb_one_local(va);
         va += PAGE_SIZE;
     }
     dsb(sy); /* Ensure completion of the TLB flush */
@@ -253,18 +253,18 @@  static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
 }
 
 /*
- * Flush a range of VA's hypervisor mappings from the data TLB of all
- * processors in the inner-shareable domain. This is not sufficient
- * when changing code mappings or for self modifying code.
+ * Flush a range of VA's hypervisor mappings from the TLB of all
+ * processors in the inner-shareable domain.
  */
-static inline void flush_xen_data_tlb_range_va(unsigned long va,
-                                               unsigned long size)
+static inline void flush_xen_tlb_range_va(vaddr_t va,
+                                          unsigned long size)
 {
-    unsigned long end = va + size;
+    vaddr_t end = va + size;
+
     dsb(sy); /* Ensure preceding are visible */
     while ( va < end )
     {
-        __flush_xen_data_tlb_one(va);
+        __flush_xen_tlb_one(va);
         va += PAGE_SIZE;
     }
     dsb(sy); /* Ensure completion of the TLB flush */