@@ -1345,9 +1345,6 @@ struct kvm_x86_ops {
gfn_t offset, unsigned long mask);
int (*write_log_dirty)(struct kvm_vcpu *vcpu);
- /* pmu operations of sub-arch */
- const struct kvm_pmu_ops *pmu_ops;
-
/*
* Architecture specific hooks for vCPU blocking due to
* HLT instruction.
@@ -36,23 +36,6 @@ extern void kvm_x86_pmu_refresh(struct kvm_vcpu *vcpu);
extern void kvm_x86_pmu_init(struct kvm_vcpu *vcpu);
extern void kvm_x86_pmu_reset(struct kvm_vcpu *vcpu);
-struct kvm_pmu_ops {
- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
- u8 unit_mask);
- unsigned (*find_fixed_event)(int idx);
- bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
- struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
- struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
- u64 *mask);
- int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
- bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
- int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
- int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
- void (*refresh)(struct kvm_vcpu *vcpu);
- void (*init)(struct kvm_vcpu *vcpu);
- void (*reset)(struct kvm_vcpu *vcpu);
-};
-
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -138,7 +121,4 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
-
-extern struct kvm_pmu_ops intel_pmu_ops;
-extern struct kvm_pmu_ops amd_pmu_ops;
#endif /* __KVM_X86_PMU_H */
@@ -300,18 +300,3 @@ void kvm_x86_pmu_reset(struct kvm_vcpu *vcpu)
pmc->counter = pmc->eventsel = 0;
}
}
-
-struct kvm_pmu_ops amd_pmu_ops = {
- .find_arch_event = kvm_x86_pmu_find_arch_event,
- .find_fixed_event = kvm_x86_pmu_find_fixed_event,
- .pmc_is_enabled = kvm_x86_pmu_pmc_is_enabled,
- .pmc_idx_to_pmc = kvm_x86_pmu_pmc_idx_to_pmc,
- .msr_idx_to_pmc = kvm_x86_pmu_msr_idx_to_pmc,
- .is_valid_msr_idx = kvm_x86_pmu_is_valid_msr_idx,
- .is_valid_msr = kvm_x86_pmu_is_valid_msr,
- .get_msr = kvm_x86_pmu_get_msr,
- .set_msr = kvm_x86_pmu_set_msr,
- .refresh = kvm_x86_pmu_refresh,
- .init = kvm_x86_pmu_init,
- .reset = kvm_x86_pmu_reset,
-};
@@ -7290,7 +7290,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.sched_in = kvm_x86_sched_in,
- .pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = kvm_x86_deliver_posted_interrupt,
.dy_apicv_has_pending_interrupt = kvm_x86_dy_apicv_has_pending_interrupt,
.update_pi_irte = kvm_x86_update_pi_irte,
@@ -359,18 +359,3 @@ void kvm_x86_pmu_reset(struct kvm_vcpu *vcpu)
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0;
}
-
-struct kvm_pmu_ops intel_pmu_ops = {
- .find_arch_event = kvm_x86_pmu_find_arch_event,
- .find_fixed_event = kvm_x86_pmu_find_fixed_event,
- .pmc_is_enabled = kvm_x86_pmu_pmc_is_enabled,
- .pmc_idx_to_pmc = kvm_x86_pmu_pmc_idx_to_pmc,
- .msr_idx_to_pmc = kvm_x86_pmu_msr_idx_to_pmc,
- .is_valid_msr_idx = kvm_x86_pmu_is_valid_msr_idx,
- .is_valid_msr = kvm_x86_pmu_is_valid_msr,
- .get_msr = kvm_x86_pmu_get_msr,
- .set_msr = kvm_x86_pmu_set_msr,
- .refresh = kvm_x86_pmu_refresh,
- .init = kvm_x86_pmu_init,
- .reset = kvm_x86_pmu_reset,
-};
@@ -7897,8 +7897,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.pre_block = kvm_x86_pre_block,
.post_block = kvm_x86_post_block,
- .pmu_ops = &intel_pmu_ops,
-
.update_pi_irte = kvm_x86_update_pi_irte,
#ifdef CONFIG_X86_64
Cleanup after the structure was finally left completely unused. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> --- arch/x86/include/asm/kvm_host.h | 3 --- arch/x86/kvm/pmu.h | 20 -------------------- arch/x86/kvm/pmu_amd.c | 15 --------------- arch/x86/kvm/svm.c | 1 - arch/x86/kvm/vmx/pmu_intel.c | 15 --------------- arch/x86/kvm/vmx/vmx.c | 2 -- 6 files changed, 56 deletions(-)