@@ -232,7 +232,7 @@ unsigned int flush_area_local(const void
if ( flags & FLUSH_HVM_ASID_CORE )
hvm_flush_guest_tlbs();
- if ( flags & FLUSH_CACHE )
+ if ( flags & (FLUSH_CACHE | FLUSH_CACHE_WRITEBACK) )
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
unsigned long sz = 0;
@@ -245,13 +245,16 @@ unsigned int flush_area_local(const void
c->x86_clflush_size && c->x86_cache_size && sz &&
((sz >> 10) < c->x86_cache_size) )
{
- cache_flush(va, sz);
- flags &= ~FLUSH_CACHE;
+ if ( flags & FLUSH_CACHE )
+ cache_flush(va, sz);
+ else
+ cache_writeback(va, sz);
+ flags &= ~(FLUSH_CACHE | FLUSH_CACHE_WRITEBACK);
}
- else
- {
+ else if ( flags & FLUSH_CACHE )
wbinvd();
- }
+ else
+ wbnoinvd();
}
if ( flags & FLUSH_ROOT_PGTBL )
@@ -135,6 +135,8 @@ void switch_cr3_cr4(unsigned long cr3, u
#else
# define FLUSH_NO_ASSIST 0
#endif
+ /* Write back data cache contents */
+#define FLUSH_CACHE_WRITEBACK 0x10000
/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);
@@ -194,7 +196,11 @@ static inline int clean_and_invalidate_d
}
static inline int clean_dcache_va_range(const void *p, unsigned long size)
{
- return clean_and_invalidate_dcache_va_range(p, size);
+ unsigned int order = get_order_from_bytes(size);
+
+ /* sub-page granularity support needs to be added if necessary */
+ flush_area_local(p, FLUSH_CACHE_WRITEBACK | FLUSH_ORDER(order));
+ return 0;
}
unsigned int guest_flush_tlb_flags(const struct domain *d);
The majority of the present callers really aren't after invalidating cache contents, but only after writeback. Make this available by simply extending the FLUSH_CACHE handling accordingly. No feature checks are required here: cache_writeback() falls back to cache_flush() as necessary, while WBNOINVD degenerates to WBINVD on older hardware. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v2: FLUSH_WRITEBACK -> FLUSH_CACHE_WRITEBACK.