@@ -420,7 +420,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
}
#endif
-void flush_page_to_ram(unsigned long mfn)
+void flush_page_to_ram(unsigned long mfn, bool sync_icache)
{
void *v = map_domain_page(_mfn(mfn));
@@ -435,7 +435,8 @@ void flush_page_to_ram(unsigned long mfn)
* I-Cache (See D4.9.2 in ARM DDI 0487A.k_iss10775). Instead of using flush
* by VA on select platforms, we just flush the entire cache here.
*/
- invalidate_icache();
+ if ( sync_icache )
+ invalidate_icache();
}
void __init arch_init_memory(void)
@@ -1392,7 +1392,7 @@ int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr)
/* XXX: Implement preemption */
while ( gfn_x(start) < gfn_x(next_gfn) )
{
- flush_page_to_ram(mfn_x(mfn));
+ flush_page_to_ram(mfn_x(mfn), true);
start = gfn_add(start, 1);
mfn = mfn_add(mfn, 1);
@@ -833,7 +833,7 @@ static struct page_info *alloc_heap_pages(
/* Ensure cache and RAM are consistent for platforms where the
* guest can control its own visibility of/through the cache.
*/
- flush_page_to_ram(page_to_mfn(&pg[i]));
+ flush_page_to_ram(page_to_mfn(&pg[i]), true);
}
spin_unlock(&heap_lock);
@@ -407,7 +407,7 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va,
}
/* Flush the dcache for an entire page. */
-void flush_page_to_ram(unsigned long mfn);
+void flush_page_to_ram(unsigned long mfn, bool sync_icache);
/*
* Print a walk of a page table or p2m
@@ -118,7 +118,7 @@ void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
#define flush_tlb_one_all(v) \
flush_tlb_one_mask(&cpu_online_map, v)
-static inline void flush_page_to_ram(unsigned long mfn) {}
+static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
static inline int invalidate_dcache_va_range(const void *p,
unsigned long size)
{ return -EOPNOTSUPP; }
flush_page_to_ram() unconditionally drops the icache. In certain situations this leads to execessive icache flushes when flush_page_to_ram() ends up being repeatedly called in a loop. Introduce a parameter to allow callers of flush_page_to_ram() to take responsibility of synchronising the icache. This is in preparations for adding logic to make the callers perform the necessary icache maintenance operations. Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> --- xen/arch/arm/mm.c | 5 +++-- xen/arch/arm/p2m.c | 2 +- xen/common/page_alloc.c | 2 +- xen/include/asm-arm/page.h | 2 +- xen/include/asm-x86/flushtlb.h | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-)