diff mbox series

[4/5] KVM: x86/pmu: Reorder functions to reduce unnecessary declarations

Message ID 20230310105346.12302-5-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Hide guest counter updates from the VMRUN instruction | expand

Commit Message

Like Xu March 10, 2023, 10:53 a.m. UTC
From: Like Xu <likexu@tencent.com>

Considering that more emulations are deferred to kvm_pmu_handle_event(),
moving it to the end of pmu.c makes it easier to call previous functions,
instead of just piling up the function declarations to make compiler green.
The same motivation is applied to kvm_pmu_request_counter_reprogram(),
as it is the trigger for any emulations delay.

No functional change intended.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/pmu.c | 52 +++++++++++++++++++++++-----------------------
 arch/x86/kvm/pmu.h | 12 +++++------
 2 files changed, 32 insertions(+), 32 deletions(-)

Comments

Sean Christopherson May 24, 2023, 9:14 p.m. UTC | #1
On Fri, Mar 10, 2023, Like Xu wrote:
> From: Like Xu <likexu@tencent.com>
> 
> Considering that more emulations are deferred to kvm_pmu_handle_event(),
> moving it to the end of pmu.c makes it easier to call previous functions,
> instead of just piling up the function declarations to make compiler green.

kvm_pmu_handle_event() is globally visible, moving it around changes nothing.

As for using it in kvm_mark_pmc_is_quirky(), explicitly state the direct
motivation for moving kvm_pmu_request_counter_reprogram(), i.e. that it's being
hoisted above pmc_read_counter() for use in a future patch.  Using abstract
language might sound pretty and dramatic, but it's really not helpful for reviewers.

> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index db4262fe8814..a47b579667c6 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -48,6 +48,12 @@ static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
>  	return pmu->counter_bitmask[pmc->type];
>  }
>  
> +static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
> +{
> +	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
> +	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
> +}
> +
>  static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
>  {
>  	u64 counter, enabled, running;
> @@ -183,12 +189,6 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
>  					     KVM_PMC_MAX_FIXED);
>  }
>  
> -static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
> -{
> -	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
> -	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
> -}
> -
>  static inline bool pebs_is_enabled(struct kvm_pmc *pmc)
>  {
>  	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
> -- 
> 2.39.2
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 81c7cc4ceadf..2a0504732966 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -467,32 +467,6 @@  static inline void kvm_pmu_handle_pmc_overflow(struct kvm_pmc *pmc)
 	pmc->prev_counter = 0;
 }
 
-void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-	int bit;
-
-	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
-		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
-
-		if (unlikely(!pmc)) {
-			clear_bit(bit, pmu->reprogram_pmi);
-			continue;
-		}
-
-		reprogram_counter(pmc);
-		kvm_pmu_handle_pmc_overflow(pmc);
-	}
-
-	/*
-	 * Unused perf_events are only released if the corresponding MSRs
-	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
-	 * triggers KVM_REQ_PMU if cleanup is needed.
-	 */
-	if (unlikely(pmu->need_cleanup))
-		kvm_pmu_cleanup(vcpu);
-}
-
 /* check if idx is a valid index to access PMU */
 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 {
@@ -847,3 +821,29 @@  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 	kfree(filter);
 	return r;
 }
+
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	int bit;
+
+	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
+		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
+
+		if (unlikely(!pmc)) {
+			clear_bit(bit, pmu->reprogram_pmi);
+			continue;
+		}
+
+		reprogram_counter(pmc);
+		kvm_pmu_handle_pmc_overflow(pmc);
+	}
+
+	/*
+	 * Unused perf_events are only released if the corresponding MSRs
+	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
+	 * triggers KVM_REQ_PMU if cleanup is needed.
+	 */
+	if (unlikely(pmu->need_cleanup))
+		kvm_pmu_cleanup(vcpu);
+}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index db4262fe8814..a47b579667c6 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -48,6 +48,12 @@  static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 	return pmu->counter_bitmask[pmc->type];
 }
 
+static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
+{
+	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
+	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+}
+
 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
 {
 	u64 counter, enabled, running;
@@ -183,12 +189,6 @@  static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
 					     KVM_PMC_MAX_FIXED);
 }
 
-static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
-{
-	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
-	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-}
-
 static inline bool pebs_is_enabled(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);