@@ -1699,7 +1699,7 @@ void context_switch(struct vcpu *prev, s
!cpumask_empty(&dirty_mask)) )
{
/* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_tlb_mask(&dirty_mask);
+ flush_mask(&dirty_mask, FLUSH_TLB | FLUSH_STATE);
}
if ( prev != next )
@@ -1808,7 +1808,7 @@ void sync_vcpu_execstate(struct vcpu *v)
sync_local_execstate();
/* Other cpus call __sync_local_execstate from flush ipi handler. */
- flush_tlb_mask(v->vcpu_dirty_cpumask);
+ flush_mask(v->vcpu_dirty_cpumask, FLUSH_TLB | FLUSH_STATE);
}
static int relinquish_memory(
@@ -207,9 +207,10 @@ void invalidate_interrupt(struct cpu_use
unsigned int flags = flush_flags;
ack_APIC_irq();
perfc_incr(ipis);
- if ( __sync_local_execstate() )
+ if ( (flags & FLUSH_STATE) && __sync_local_execstate() )
flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL);
- flush_area_local(flush_va, flags);
+ if ( flags & ~(FLUSH_STATE | FLUSH_ORDER_MASK) )
+ flush_area_local(flush_va, flags);
cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
}
@@ -219,7 +220,8 @@ void flush_area_mask(const cpumask_t *ma
ASSERT(local_irq_is_enabled());
- if ( cpumask_test_cpu(cpu, mask) )
+ if ( (flags & ~(FLUSH_STATE | FLUSH_ORDER_MASK)) &&
+ cpumask_test_cpu(cpu, mask) )
flags = flush_area_local(va, flags);
if ( (flags & ~FLUSH_ORDER_MASK) &&
@@ -101,6 +101,8 @@ void write_cr3(unsigned long cr3);
#define FLUSH_CACHE 0x400
/* VA for the flush has a valid mapping */
#define FLUSH_VA_VALID 0x800
+ /* Flush CPU state */
+#define FLUSH_STATE 0x1000
/* Flush local TLBs/caches. */
unsigned int flush_area_local(const void *va, unsigned int flags);