@@ -626,6 +626,36 @@ static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
#define nested_vmx_merge_msr_bitmaps_rw(msr) \
nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
+/*
+ * Disable PMU MSRs interception for nested VM if L0 and L1 are
+ * both mediated vPMU.
+ */
+static void nested_vmx_merge_pmu_msr_bitmaps(struct kvm_vcpu *vcpu,
+ unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_l0)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int i;
+
+ if (!kvm_mediated_pmu_enabled(vcpu))
+ return;
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ nested_vmx_merge_msr_bitmaps_rw(MSR_ARCH_PERFMON_EVENTSEL0 + i);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PERFCTR0 + i);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_PMC0 + i);
+ }
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_FIXED_CTR0 + i);
+
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_FIXED_CTR_CTRL);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_CORE_PERF_GLOBAL_CTRL);
+ nested_vmx_merge_msr_bitmaps_read(MSR_CORE_PERF_GLOBAL_STATUS);
+ nested_vmx_merge_msr_bitmaps_write(MSR_CORE_PERF_GLOBAL_OVF_CTRL);
+}
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@@ -717,6 +747,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_merge_msr_bitmaps_write(MSR_IA32_PRED_CMD);
nested_vmx_merge_msr_bitmaps_write(MSR_IA32_FLUSH_CMD);
+ nested_vmx_merge_pmu_msr_bitmaps(vcpu, msr_bitmap_l1, msr_bitmap_l0);
+
kvm_vcpu_unmap(vcpu, &map);
vmx->nested.force_msr_bitmap_recalc = false;