Message ID | 20240801045907.4010984-44-mizhang@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Mediated Passthrough vPMU 3.0 for x86 | expand |
On 7/31/2024 9:58 PM, Mingwei Zhang wrote: > Introduce PMU operator for setting counter overflow. When emulating counter > increment, multiple counters could overflow at the same time, i.e., during > the execution of the same instruction. In passthrough PMU, having an PMU > operator provides convenience to update the PMU global status in one shot > with details hidden behind the vendor specific implementation. Since neither Intel nor AMD does implement this API, this patch should be dropped. > > Signed-off-by: Mingwei Zhang <mizhang@google.com> > Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> > --- > arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + > arch/x86/kvm/pmu.h | 1 + > arch/x86/kvm/vmx/pmu_intel.c | 5 +++++ > 3 files changed, 7 insertions(+) > > diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h > index 72ca78df8d2b..bd5b118a5ce5 100644 > --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h > +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h > @@ -28,6 +28,7 @@ KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) > KVM_X86_PMU_OP_OPTIONAL(save_pmu_context) > KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context) > KVM_X86_PMU_OP_OPTIONAL(incr_counter) > +KVM_X86_PMU_OP_OPTIONAL(set_overflow) > > #undef KVM_X86_PMU_OP > #undef KVM_X86_PMU_OP_OPTIONAL > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > index 325f17673a00..78a7f0c5f3ba 100644 > --- a/arch/x86/kvm/pmu.h > +++ b/arch/x86/kvm/pmu.h > @@ -45,6 +45,7 @@ struct kvm_pmu_ops { > void (*save_pmu_context)(struct kvm_vcpu *vcpu); > void (*restore_pmu_context)(struct kvm_vcpu *vcpu); > bool (*incr_counter)(struct kvm_pmc *pmc); > + void (*set_overflow)(struct kvm_vcpu *vcpu); > > const u64 EVENTSEL_EVENT; > const int MAX_NR_GP_COUNTERS; > diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c > index 42af2404bdb9..2d46c911f0b7 100644 > --- a/arch/x86/kvm/vmx/pmu_intel.c > +++ b/arch/x86/kvm/vmx/pmu_intel.c > @@ -881,6 +881,10 @@ static void intel_restore_guest_pmu_context(struct kvm_vcpu *vcpu) > wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl_hw); > } > > +static void intel_set_overflow(struct kvm_vcpu *vcpu) > +{ > +} > + > struct kvm_pmu_ops intel_pmu_ops __initdata = { > .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, > .msr_idx_to_pmc = intel_msr_idx_to_pmc, > @@ -897,6 +901,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { > .save_pmu_context = intel_save_guest_pmu_context, > .restore_pmu_context = intel_restore_guest_pmu_context, > .incr_counter = intel_incr_counter, > + .set_overflow = intel_set_overflow, > .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, > .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC, > .MIN_NR_GP_COUNTERS = 1,
On 10/26/2024 12:16 AM, Chen, Zide wrote: > > On 7/31/2024 9:58 PM, Mingwei Zhang wrote: >> Introduce PMU operator for setting counter overflow. When emulating counter >> increment, multiple counters could overflow at the same time, i.e., during >> the execution of the same instruction. In passthrough PMU, having an PMU >> operator provides convenience to update the PMU global status in one shot >> with details hidden behind the vendor specific implementation. > Since neither Intel nor AMD does implement this API, this patch should > be dropped. oh, yes. > >> Signed-off-by: Mingwei Zhang <mizhang@google.com> >> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> >> --- >> arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 + >> arch/x86/kvm/pmu.h | 1 + >> arch/x86/kvm/vmx/pmu_intel.c | 5 +++++ >> 3 files changed, 7 insertions(+) >> >> diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h >> index 72ca78df8d2b..bd5b118a5ce5 100644 >> --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h >> +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h >> @@ -28,6 +28,7 @@ KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) >> KVM_X86_PMU_OP_OPTIONAL(save_pmu_context) >> KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context) >> KVM_X86_PMU_OP_OPTIONAL(incr_counter) >> +KVM_X86_PMU_OP_OPTIONAL(set_overflow) >> >> #undef KVM_X86_PMU_OP >> #undef KVM_X86_PMU_OP_OPTIONAL >> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h >> index 325f17673a00..78a7f0c5f3ba 100644 >> --- a/arch/x86/kvm/pmu.h >> +++ b/arch/x86/kvm/pmu.h >> @@ -45,6 +45,7 @@ struct kvm_pmu_ops { >> void (*save_pmu_context)(struct kvm_vcpu *vcpu); >> void (*restore_pmu_context)(struct kvm_vcpu *vcpu); >> bool (*incr_counter)(struct kvm_pmc *pmc); >> + void (*set_overflow)(struct kvm_vcpu *vcpu); >> >> const u64 EVENTSEL_EVENT; >> const int MAX_NR_GP_COUNTERS; >> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c >> index 42af2404bdb9..2d46c911f0b7 100644 >> --- a/arch/x86/kvm/vmx/pmu_intel.c >> +++ b/arch/x86/kvm/vmx/pmu_intel.c >> @@ -881,6 +881,10 @@ static void intel_restore_guest_pmu_context(struct kvm_vcpu *vcpu) >> wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl_hw); >> } >> >> +static void intel_set_overflow(struct kvm_vcpu *vcpu) >> +{ >> +} >> + >> struct kvm_pmu_ops intel_pmu_ops __initdata = { >> .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, >> .msr_idx_to_pmc = intel_msr_idx_to_pmc, >> @@ -897,6 +901,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { >> .save_pmu_context = intel_save_guest_pmu_context, >> .restore_pmu_context = intel_restore_guest_pmu_context, >> .incr_counter = intel_incr_counter, >> + .set_overflow = intel_set_overflow, >> .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, >> .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC, >> .MIN_NR_GP_COUNTERS = 1,
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index 72ca78df8d2b..bd5b118a5ce5 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -28,6 +28,7 @@ KVM_X86_PMU_OP_OPTIONAL(passthrough_pmu_msrs) KVM_X86_PMU_OP_OPTIONAL(save_pmu_context) KVM_X86_PMU_OP_OPTIONAL(restore_pmu_context) KVM_X86_PMU_OP_OPTIONAL(incr_counter) +KVM_X86_PMU_OP_OPTIONAL(set_overflow) #undef KVM_X86_PMU_OP #undef KVM_X86_PMU_OP_OPTIONAL diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 325f17673a00..78a7f0c5f3ba 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -45,6 +45,7 @@ struct kvm_pmu_ops { void (*save_pmu_context)(struct kvm_vcpu *vcpu); void (*restore_pmu_context)(struct kvm_vcpu *vcpu); bool (*incr_counter)(struct kvm_pmc *pmc); + void (*set_overflow)(struct kvm_vcpu *vcpu); const u64 EVENTSEL_EVENT; const int MAX_NR_GP_COUNTERS; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 42af2404bdb9..2d46c911f0b7 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -881,6 +881,10 @@ static void intel_restore_guest_pmu_context(struct kvm_vcpu *vcpu) wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl_hw); } +static void intel_set_overflow(struct kvm_vcpu *vcpu) +{ +} + struct kvm_pmu_ops intel_pmu_ops __initdata = { .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, .msr_idx_to_pmc = intel_msr_idx_to_pmc, @@ -897,6 +901,7 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { .save_pmu_context = intel_save_guest_pmu_context, .restore_pmu_context = intel_restore_guest_pmu_context, .incr_counter = intel_incr_counter, + .set_overflow = intel_set_overflow, .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC, .MIN_NR_GP_COUNTERS = 1,