@@ -537,6 +537,8 @@ void hvm_do_resume(struct vcpu *v)
v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
}
+ __cpumask_set_cpu(v->processor, v->arch.hvm.cache_dirty_mask);
+
if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
{
struct x86_event info;
@@ -1592,6 +1594,10 @@ int hvm_vcpu_initialise(struct vcpu *v)
if ( rc )
goto fail6;
+ rc = -ENOMEM;
+ if ( !zalloc_cpumask_var(&v->arch.hvm.cache_dirty_mask) )
+ goto fail6;
+
rc = ioreq_server_add_vcpu_all(d, v);
if ( rc != 0 )
goto fail6;
@@ -1621,6 +1627,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
hvm_vcpu_cacheattr_destroy(v);
fail1:
viridian_vcpu_deinit(v);
+ FREE_CPUMASK_VAR(v->arch.hvm.cache_dirty_mask);
return rc;
}
@@ -1628,6 +1635,8 @@ void hvm_vcpu_destroy(struct vcpu *v)
{
viridian_vcpu_deinit(v);
+ FREE_CPUMASK_VAR(v->arch.hvm.cache_dirty_mask);
+
ioreq_server_remove_vcpu_all(v->domain, v);
if ( hvm_altp2m_supported() )
@@ -2363,8 +2363,14 @@ static void svm_vmexit_mce_intercept(
static void cf_check svm_wbinvd_intercept(void)
{
- if ( cache_flush_permitted(current->domain) )
- flush_all(FLUSH_CACHE_WRITEBACK);
+ struct vcpu *curr = current;
+
+ if ( !cache_flush_permitted(curr->domain) )
+ return;
+
+ flush_mask(curr->arch.hvm.cache_dirty_mask, FLUSH_CACHE_WRITEBACK);
+ cpumask_copy(curr->arch.hvm.cache_dirty_mask,
+ cpumask_of(curr->processor));
}
static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs,
@@ -3710,11 +3710,17 @@ static void vmx_do_extint(struct cpu_use
static void cf_check vmx_wbinvd_intercept(void)
{
- if ( !cache_flush_permitted(current->domain) || iommu_snoop )
+ struct vcpu *curr = current;
+
+ if ( !cache_flush_permitted(curr->domain) || iommu_snoop )
return;
if ( cpu_has_wbinvd_exiting )
- flush_all(FLUSH_CACHE_WRITEBACK);
+ {
+ flush_mask(curr->arch.hvm.cache_dirty_mask, FLUSH_CACHE_WRITEBACK);
+ cpumask_copy(curr->arch.hvm.cache_dirty_mask,
+ cpumask_of(curr->processor));
+ }
else
wbnoinvd();
}
@@ -161,6 +161,8 @@ struct hvm_vcpu {
struct svm_vcpu svm;
};
+ cpumask_var_t cache_dirty_mask;
+
struct tasklet assert_evtchn_irq_tasklet;
struct nestedvcpu nvcpu;
There's no need to write back caches on all CPUs upon seeing a WBINVD exit; ones that a vCPU hasn't run on since the last writeback (or since it was started) can't hold data which may need writing back. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- With us not running AMD IOMMUs in non-coherent ways, I wonder whether svm_wbinvd_intercept() really needs to do anything (or whether it couldn't check iommu_snoop just like VMX does, knowing that as of c609108b2190 ["x86/shadow: make iommu_snoop usage consistent with HAP's"] that's always set; this would largely serve as grep fodder then, to make sure this code is updated once / when we do away with this global variable, and it would be the penultimate step to being able to fold SVM's and VT-x'es functions). --- v2: Re-base over changes earlier in the series.