@@ -25,6 +25,8 @@ KVM_X86_PMU_OP_OPTIONAL(reset)
KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
KVM_X86_PMU_OP_OPTIONAL(cleanup)
KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs)
+KVM_X86_PMU_OP_OPTIONAL(save_pmu_context)
+KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context)
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
@@ -1065,3 +1065,17 @@ void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu)
{
static_call_cond(kvm_x86_pmu_passthrough_pmu_msrs)(vcpu);
}
+
+void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+
+ static_call_cond(kvm_x86_pmu_save_pmu_context)(vcpu);
+}
+
+void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_irqs_disabled();
+
+ static_call_cond(kvm_x86_pmu_restore_pmu_context)(vcpu);
+}
@@ -42,6 +42,8 @@ struct kvm_pmu_ops {
void (*cleanup)(struct kvm_vcpu *vcpu);
bool (*is_rdpmc_passthru_allowed)(struct kvm_vcpu *vcpu);
void (*passthrough_pmu_msrs)(struct kvm_vcpu *vcpu);
+ void (*save_pmu_context)(struct kvm_vcpu *vcpu);
+ void (*restore_pmu_context)(struct kvm_vcpu *vcpu);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -294,6 +296,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
bool kvm_pmu_check_rdpmc_passthrough(struct kvm_vcpu *vcpu);
void kvm_pmu_passthrough_pmu_msrs(struct kvm_vcpu *vcpu);
+void kvm_pmu_save_pmu_context(struct kvm_vcpu *vcpu);
+void kvm_pmu_restore_pmu_context(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);