@@ -52,8 +52,21 @@ static inline void p2m_write_lock(struct p2m_domain *p2m)
write_lock(&p2m->lock);
}
+static void p2m_flush_tlb(struct p2m_domain *p2m);
+
static inline void p2m_write_unlock(struct p2m_domain *p2m)
{
+ if ( p2m->need_flush )
+ {
+ p2m->need_flush = false;
+ /*
+ * The final flush is done with the P2M write lock taken to
+ * to avoid someone else modify the P2M before the TLB
+ * invalidation has completed.
+ */
+ p2m_flush_tlb(p2m);
+ }
+
write_unlock(&p2m->lock);
}
@@ -72,6 +85,11 @@ static inline int p2m_is_locked(struct p2m_domain *p2m)
return rw_is_locked(&p2m->lock);
}
+static inline int p2m_is_write_locked(struct p2m_domain *p2m)
+{
+ return rw_is_write_locked(&p2m->lock);
+}
+
void p2m_dump_info(struct domain *d)
{
struct p2m_domain *p2m = &d->arch.p2m;
@@ -165,6 +183,19 @@ static void p2m_flush_tlb(struct p2m_domain *p2m)
}
/*
+ * Force a synchronous P2M TLB flush.
+ *
+ * Must be called with the p2m lock held.
+ */
+static void p2m_flush_tlb_sync(struct p2m_domain *p2m)
+{
+ ASSERT(p2m_is_write_locked(p2m));
+
+ p2m_flush_tlb(p2m);
+ p2m->need_flush = false;
+}
+
+/*
* Lookup the MFN corresponding to a domain's GFN.
*
* There are no processor functions to do a stage 2 only lookup therefore we
@@ -1153,7 +1184,7 @@ static int apply_p2m_changes(struct domain *d,
out:
if ( flush )
{
- p2m_flush_tlb(&d->arch.p2m);
+ p2m_flush_tlb_sync(&d->arch.p2m);
ret = iommu_iotlb_flush(d, gfn_x(sgfn), nr);
if ( !rc )
rc = ret;
@@ -51,6 +51,17 @@ struct p2m_domain {
/* Indicate if it is required to clean the cache when writing an entry */
bool clean_pte;
+ /*
+ * P2M updates may required TLBs to be flushed (invalidated).
+ *
+ * Flushes may be deferred by setting 'need_flush' and then flushing
+ * when the p2m write lock is released.
+ *
+ * If an immediate flush is required (e.g, if a super page is
+ * shattered), call p2m_tlb_flush_sync().
+ */
+ bool need_flush;
+
/* Gather some statistics for information purposes only */
struct {
/* Number of mappings at each p2m tree level */