@@ -151,7 +151,7 @@ void p2m_restore_state(struct vcpu *n)
* when running multiple vCPU of the same domain on a single pCPU.
*/
if ( *last_vcpu_ran != INVALID_VCPU_ID && *last_vcpu_ran != n->vcpu_id )
- flush_tlb_local();
+ flush_guest_tlb_local();
*last_vcpu_ran = n->vcpu_id;
}
@@ -196,7 +196,7 @@ static void p2m_force_tlb_flush_sync(struct p2m_domain *p2m)
isb();
}
- flush_tlb();
+ flush_guest_tlb();
if ( ovttbr != READ_SYSREG64(VTTBR_EL2) )
{
@@ -1969,7 +1969,7 @@ static void setup_virt_paging_one(void *data)
WRITE_SYSREG(READ_SYSREG(HCR_EL2) | HCR_VM, HCR_EL2);
isb();
- flush_tlb_all_local();
+ flush_all_guests_tlb_local();
}
}
@@ -8,7 +8,7 @@
void flush_tlb_mask(const cpumask_t *mask)
{
/* No need to IPI other processors on ARM, the processor takes care of it. */
- flush_tlb_all();
+ flush_all_guests_tlb();
}
void smp_send_event_check_mask(const cpumask_t *mask)
@@ -1924,7 +1924,7 @@ static void do_trap_stage2_abort_guest(struct cpu_user_regs *regs,
* still be inaccurate.
*/
if ( !is_data )
- flush_tlb_local();
+ flush_guest_tlb_local();
rc = gva_to_ipa(gva, &gpa, GV2M_READ);
/*
@@ -2,7 +2,7 @@
#define __ASM_ARM_ARM32_FLUSHTLB_H__
/* Flush local TLBs, current VMID only */
-static inline void flush_tlb_local(void)
+static inline void flush_guest_tlb_local(void)
{
dsb(sy);
@@ -13,7 +13,7 @@ static inline void flush_tlb_local(void)
}
/* Flush inner shareable TLBs, current VMID only */
-static inline void flush_tlb(void)
+static inline void flush_guest_tlb(void)
{
dsb(sy);
@@ -24,7 +24,7 @@ static inline void flush_tlb(void)
}
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_tlb_all_local(void)
+static inline void flush_all_guests_tlb_local(void)
{
dsb(sy);
@@ -35,7 +35,7 @@ static inline void flush_tlb_all_local(void)
}
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_tlb_all(void)
+static inline void flush_all_guests_tlb(void)
{
dsb(sy);
@@ -2,7 +2,7 @@
#define __ASM_ARM_ARM64_FLUSHTLB_H__
/* Flush local TLBs, current VMID only */
-static inline void flush_tlb_local(void)
+static inline void flush_guest_tlb_local(void)
{
asm volatile(
"dsb sy;"
@@ -13,7 +13,7 @@ static inline void flush_tlb_local(void)
}
/* Flush innershareable TLBs, current VMID only */
-static inline void flush_tlb(void)
+static inline void flush_guest_tlb(void)
{
asm volatile(
"dsb sy;"
@@ -24,7 +24,7 @@ static inline void flush_tlb(void)
}
/* Flush local TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_tlb_all_local(void)
+static inline void flush_all_guests_tlb_local(void)
{
asm volatile(
"dsb sy;"
@@ -35,7 +35,7 @@ static inline void flush_tlb_all_local(void)
}
/* Flush innershareable TLBs, all VMIDs, non-hypervisor mode */
-static inline void flush_tlb_all(void)
+static inline void flush_all_guests_tlb(void)
{
asm volatile(
"dsb sy;"