@@ -152,16 +152,26 @@ static void populate_physmap(struct memop_args *a)
max_order(curr_d)) )
return;
- /*
- * With MEMF_no_tlbflush set, alloc_heap_pages() will ignore
- * TLB-flushes. After VM creation, this is a security issue (it can
- * make pages accessible to guest B, when guest A may still have a
- * cached mapping to them). So we do this only during domain creation,
- * when the domain itself has not yet been unpaused for the first
- * time.
- */
if ( unlikely(!d->creation_finished) )
+ {
+ /*
+ * With MEMF_no_tlbflush set, alloc_heap_pages() will ignore
+ * TLB-flushes. After VM creation, this is a security issue (it can
+ * make pages accessible to guest B, when guest A may still have a
+ * cached mapping to them). So we do this only during domain creation,
+ * when the domain itself has not yet been unpaused for the first
+ * time.
+ */
a->memflags |= MEMF_no_tlbflush;
+ /*
+ * With MEMF_no_icache_flush, alloc_heap_pages() will skip
+ * performing icache flushes. We do it only before domain
+ * creation as once the domain is running there is a danger of
+ * executing instructions from stale caches if icache flush is
+ * delayed.
+ */
+ a->memflags |= MEMF_no_icache_flush;
+ }
for ( i = a->nr_done; i < a->nr_extents; i++ )
{
@@ -211,7 +221,6 @@ static void populate_physmap(struct memop_args *a)
}
mfn = gpfn;
- page = mfn_to_page(mfn);
}
else
{
@@ -255,6 +264,10 @@ static void populate_physmap(struct memop_args *a)
out:
if ( need_tlbflush )
filtered_flush_tlb_mask(tlbflush_timestamp);
+
+ if ( a->memflags & MEMF_no_icache_flush )
+ invalidate_icache();
+
a->nr_done = i;
}
@@ -833,7 +833,7 @@ static struct page_info *alloc_heap_pages(
/* Ensure cache and RAM are consistent for platforms where the
* guest can control its own visibility of/through the cache.
*/
- flush_page_to_ram(page_to_mfn(&pg[i]), true);
+ flush_page_to_ram(page_to_mfn(&pg[i]), !(memflags & MEMF_no_icache_flush));
}
spin_unlock(&heap_lock);
@@ -375,6 +375,10 @@ perms_strictly_increased(uint32_t old_flags, uint32_t new_flags)
#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+static inline void invalidate_icache(void)
+{
+}
+
#endif /* __X86_PAGE_H__ */
/*
@@ -224,6 +224,8 @@ struct npfec {
#define MEMF_no_owner (1U<<_MEMF_no_owner)
#define _MEMF_no_tlbflush 6
#define MEMF_no_tlbflush (1U<<_MEMF_no_tlbflush)
+#define _MEMF_no_icache_flush 7
+#define MEMF_no_icache_flush (1U<<_MEMF_no_icache_flush)
#define _MEMF_node 8
#define MEMF_node_mask ((1U << (8 * sizeof(nodeid_t))) - 1)
#define MEMF_node(n) ((((n) + 1) & MEMF_node_mask) << _MEMF_node)
populate_physmap() calls alloc_heap_pages() per requested extent. alloc_heap_pages() invalidates the entire icache per extent. During domain creation, the icache invalidations can be deffered until all the extents have been allocated as there is no risk of executing stale instructions from the icache. Introduce a new flag "MEMF_no_icache_flush" to be used to prevent alloc_heap_pages() from performing icache maintenance operations. Use the flag in populate_physmap() before the domain has been unpaused and perform required icache maintenance function at the end of the allocation. One concern is the lack of synchronisation around testing for "creation_finished". But it seems, in practice the window where it is out of sync should be small enough to not matter. Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> --- xen/common/memory.c | 31 ++++++++++++++++++++++--------- xen/common/page_alloc.c | 2 +- xen/include/asm-x86/page.h | 4 ++++ xen/include/xen/mm.h | 2 ++ 4 files changed, 29 insertions(+), 10 deletions(-)