diff mbox

[V5,0/4] Consolidated KVM vPMU support for x86

Message ID 558431BB.9090107@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini June 19, 2015, 3:14 p.m. UTC
While the code looks great, the splitting of patches 1 and 2
left something to be desired.  I've redone the split altogether
and I'll place it soon in kvm/queue because I needed to do it myself
to figure out some suggestions; and after 4 hours it would have been
pointless for you to do the work again. :)  Nevertheless, do take some
time to review the single patches in order to understand the point of
splitting the patches this way.

Here is the new structuring of the patch series:

* KVM: x86/vPMU: rename a few PMU functions
* KVM: x86/vPMU: introduce pmu.h header
* KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU
* KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code
* KVM: x86/vPMU: reorder PMU functions
* KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc
* KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
* KVM: x86/vPMU: Implement AMD vPMU code for KVM
* KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs

The code changes compared to your patches are very minor, mostly
undoing parts of patch 1:



Note how, by splitting patches, I was able to find:

* some coding style violations (global_ctrl_changed, intel_find_fixed_event)

* a function naming inconsistency (pmc_is_enabled calls pmu_ops->pmc_enabled)

* some missed conversions to pmc_to_pmu

* the unnecessary changes in patch 1 I already mentioned

Interestingly, after the split git does not show anymore
the "big" patch as rewriting pmu.c; the diffstat for that file
is a pretty tame

 1 file changed, 47 insertions(+), 336 deletions(-)

where most of the insertions are the nice new header comment.

The trick is to treat these kind of style changes/refactorings as
a warning sign that you should be committing something soon.  You
can develop a git workflow, based for example on "git add -p" and
possibly "git stash", that lets you commit them quickly without
getting out of your previous line of thoughts.  Just get them
out of the way, you can later group them or otherwise reorder
them as you see fit.

For other examples of splitting patches, check out my SMM series
and Xiao's patches in kvm/next.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in

Comments

Wei Huang June 19, 2015, 3:40 p.m. UTC | #1
I understand that the original patch2 was huge. Thanks for taking care
of it (and the tips). I reviewed the diff below and they are reasonable.

-Wei

On 06/19/2015 10:14 AM, Paolo Bonzini wrote:
> While the code looks great, the splitting of patches 1 and 2
> left something to be desired.  I've redone the split altogether
> and I'll place it soon in kvm/queue because I needed to do it myself
> to figure out some suggestions; and after 4 hours it would have been
> pointless for you to do the work again. :)  Nevertheless, do take some
> time to review the single patches in order to understand the point of
> splitting the patches this way.
> 
> Here is the new structuring of the patch series:
> 
> * KVM: x86/vPMU: rename a few PMU functions
> * KVM: x86/vPMU: introduce pmu.h header
> * KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU
> * KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code
> * KVM: x86/vPMU: reorder PMU functions
> * KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc
> * KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
> * KVM: x86/vPMU: Implement AMD vPMU code for KVM
> * KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs
> 
> The code changes compared to your patches are very minor, mostly
> undoing parts of patch 1:
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b68b9ac847c4..5a2b4508be44 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -304,12 +304,6 @@ struct kvm_mmu {
>  	u64 pdptrs[4]; /* pae */
>  };
>  
> -struct msr_data {
> -	bool host_initiated;
> -	u32 index;
> -	u64 data;
> -};
> -
>  enum pmc_type {
>  	KVM_PMC_GP = 0,
>  	KVM_PMC_FIXED,
> @@ -324,12 +318,6 @@ struct kvm_pmc {
>  	struct kvm_vcpu *vcpu;
>  };
>  
> -struct kvm_event_hw_type_mapping {
> -	u8 eventsel;
> -	u8 unit_mask;
> -	unsigned event_type;
> -};
> -
>  struct kvm_pmu {
>  	unsigned nr_arch_gp_counters;
>  	unsigned nr_arch_fixed_counters;
> @@ -348,21 +336,7 @@ struct kvm_pmu {
>  	u64 reprogram_pmi;
>  };
>  
> -struct kvm_pmu_ops {
> -	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
> -				    u8 unit_mask);
> -	unsigned (*find_fixed_event)(int idx);
> -	bool (*pmc_enabled)(struct kvm_pmc *pmc);
> -	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
> -	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
> -	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
> -	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
> -	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
> -	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
> -	void (*refresh)(struct kvm_vcpu *vcpu);
> -	void (*init)(struct kvm_vcpu *vcpu);
> -	void (*reset)(struct kvm_vcpu *vcpu);
> -};
> +struct kvm_pmu_ops;
>  
>  enum {
>  	KVM_DEBUGREG_BP_ENABLED = 1,
> @@ -725,6 +699,12 @@ struct kvm_vcpu_stat {
>  
>  struct x86_instruction_info;
>  
> +struct msr_data {
> +	bool host_initiated;
> +	u32 index;
> +	u64 data;
> +};
> +
>  struct kvm_lapic_irq {
>  	u32 vector;
>  	u16 delivery_mode;
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index ebb5888e9d10..31aa2c85dc97 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -10,7 +10,9 @@
>   *
>   * This work is licensed under the terms of the GNU GPL, version 2.  See
>   * the COPYING file in the top-level directory.
> + *
>   */
> +
>  #include <linux/types.h>
>  #include <linux/kvm_host.h>
>  #include <linux/perf_event.h>
> @@ -88,7 +90,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
>  		 * NMI context. Do it from irq work instead.
>  		 */
>  		if (!kvm_is_in_guest())
> -			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
> +			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
>  		else
>  			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
>  	}
> @@ -128,7 +130,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
>  	}
>  
>  	pmc->perf_event = event;
> -	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
> +	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
>  }
>  
>  void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
> @@ -154,7 +156,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
>  			  ARCH_PERFMON_EVENTSEL_CMASK |
>  			  HSW_IN_TX |
>  			  HSW_IN_TX_CHECKPOINTED))) {
> -		config = kvm_x86_ops->pmu_ops->find_arch_event(&pmc->vcpu->arch.pmu,
> +		config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
>  						      event_select,
>  						      unit_mask);
>  		if (config != PERF_COUNT_HW_MAX)
> @@ -305,4 +307,3 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
>  {
>  	kvm_pmu_reset(vcpu);
>  }
> -
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index a19a0d5fdb07..f96e1f962587 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -5,9 +5,31 @@
>  #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
>  #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
>  
> -/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
> +/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
>  #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
>  
> +struct kvm_event_hw_type_mapping {
> +	u8 eventsel;
> +	u8 unit_mask;
> +	unsigned event_type;
> +};
> +
> +struct kvm_pmu_ops {
> +	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
> +				    u8 unit_mask);
> +	unsigned (*find_fixed_event)(int idx);
> +	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
> +	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
> +	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
> +	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
> +	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
> +	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
> +	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
> +	void (*refresh)(struct kvm_vcpu *vcpu);
> +	void (*init)(struct kvm_vcpu *vcpu);
> +	void (*reset)(struct kvm_vcpu *vcpu);
> +};
> +
>  static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
>  {
>  	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
> @@ -48,7 +70,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
>  
>  static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
>  {
> -	return kvm_x86_ops->pmu_ops->pmc_enabled(pmc);
> +	return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
>  }
>  
>  /* returns general purpose PMC with the specified MSR. Note that it can be
> diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
> index ff5cea79a104..886aa25a7131 100644
> --- a/arch/x86/kvm/pmu_amd.c
> +++ b/arch/x86/kvm/pmu_amd.c
> @@ -57,7 +57,7 @@ static unsigned amd_find_fixed_event(int idx)
>  /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
>   * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
>   */
> -static bool amd_pmc_enabled(struct kvm_pmc *pmc)
> +static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
>  {
>  	return true;
>  }
> @@ -194,7 +194,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
>  struct kvm_pmu_ops amd_pmu_ops = {
>  	.find_arch_event = amd_find_arch_event,
>  	.find_fixed_event = amd_find_fixed_event,
> -	.pmc_enabled = amd_pmc_enabled,
> +	.pmc_is_enabled = amd_pmc_is_enabled,
>  	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
>  	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
>  	.is_valid_msr_idx = amd_is_valid_msr_idx,
> diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
> index 51b4729493db..ab38af4f4947 100644
> --- a/arch/x86/kvm/pmu_intel.c
> +++ b/arch/x86/kvm/pmu_intel.c
> @@ -63,9 +63,8 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
>  
>  	pmu->global_ctrl = data;
>  
> -	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
> +	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
>  		reprogram_counter(pmu, bit);
> -	}
>  }
>  
>  static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
> @@ -88,7 +87,6 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
>  
>  static unsigned intel_find_fixed_event(int idx)
>  {
> -
>  	if (idx >= ARRAY_SIZE(fixed_pmc_events))
>  		return PERF_COUNT_HW_MAX;
>  
> @@ -96,7 +94,7 @@ static unsigned intel_find_fixed_event(int idx)
>  }
>  
>  /* check if a PMC is enabled by comparising it with globl_ctrl bits. */
> -static bool intel_pmc_enabled(struct kvm_pmc *pmc)
> +static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
>  {
>  	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
>  
> @@ -347,7 +345,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
>  struct kvm_pmu_ops intel_pmu_ops = {
>  	.find_arch_event = intel_find_arch_event,
>  	.find_fixed_event = intel_find_fixed_event,
> -	.pmc_enabled = intel_pmc_enabled,
> +	.pmc_is_enabled = intel_pmc_is_enabled,
>  	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
>  	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
>  	.is_valid_msr_idx = intel_is_valid_msr_idx,
> 
> 
> Note how, by splitting patches, I was able to find:
> 
> * some coding style violations (global_ctrl_changed, intel_find_fixed_event)
> 
> * a function naming inconsistency (pmc_is_enabled calls pmu_ops->pmc_enabled)
> 
> * some missed conversions to pmc_to_pmu
> 
> * the unnecessary changes in patch 1 I already mentioned
> 
> Interestingly, after the split git does not show anymore
> the "big" patch as rewriting pmu.c; the diffstat for that file
> is a pretty tame
> 
>  1 file changed, 47 insertions(+), 336 deletions(-)
> 
> where most of the insertions are the nice new header comment.
> 
> The trick is to treat these kind of style changes/refactorings as
> a warning sign that you should be committing something soon.  You
> can develop a git workflow, based for example on "git add -p" and
> possibly "git stash", that lets you commit them quickly without
> getting out of your previous line of thoughts.  Just get them
> out of the way, you can later group them or otherwise reorder
> them as you see fit.
> 
> For other examples of splitting patches, check out my SMM series
> and Xiao's patches in kvm/next.
> 
> Paolo
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b68b9ac847c4..5a2b4508be44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -304,12 +304,6 @@  struct kvm_mmu {
 	u64 pdptrs[4]; /* pae */
 };
 
-struct msr_data {
-	bool host_initiated;
-	u32 index;
-	u64 data;
-};
-
 enum pmc_type {
 	KVM_PMC_GP = 0,
 	KVM_PMC_FIXED,
@@ -324,12 +318,6 @@  struct kvm_pmc {
 	struct kvm_vcpu *vcpu;
 };
 
-struct kvm_event_hw_type_mapping {
-	u8 eventsel;
-	u8 unit_mask;
-	unsigned event_type;
-};
-
 struct kvm_pmu {
 	unsigned nr_arch_gp_counters;
 	unsigned nr_arch_fixed_counters;
@@ -348,21 +336,7 @@  struct kvm_pmu {
 	u64 reprogram_pmi;
 };
 
-struct kvm_pmu_ops {
-	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
-				    u8 unit_mask);
-	unsigned (*find_fixed_event)(int idx);
-	bool (*pmc_enabled)(struct kvm_pmc *pmc);
-	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
-	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
-	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
-	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
-	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
-	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
-	void (*refresh)(struct kvm_vcpu *vcpu);
-	void (*init)(struct kvm_vcpu *vcpu);
-	void (*reset)(struct kvm_vcpu *vcpu);
-};
+struct kvm_pmu_ops;
 
 enum {
 	KVM_DEBUGREG_BP_ENABLED = 1,
@@ -725,6 +699,12 @@  struct kvm_vcpu_stat {
 
 struct x86_instruction_info;
 
+struct msr_data {
+	bool host_initiated;
+	u32 index;
+	u64 data;
+};
+
 struct kvm_lapic_irq {
 	u32 vector;
 	u16 delivery_mode;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index ebb5888e9d10..31aa2c85dc97 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -10,7 +10,9 @@ 
  *
  * This work is licensed under the terms of the GNU GPL, version 2.  See
  * the COPYING file in the top-level directory.
+ *
  */
+
 #include <linux/types.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
@@ -88,7 +90,7 @@  static void kvm_perf_overflow_intr(struct perf_event *perf_event,
 		 * NMI context. Do it from irq work instead.
 		 */
 		if (!kvm_is_in_guest())
-			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
 		else
 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
 	}
@@ -128,7 +130,7 @@  static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
 	}
 
 	pmc->perf_event = event;
-	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
 }
 
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
@@ -154,7 +156,7 @@  void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 			  ARCH_PERFMON_EVENTSEL_CMASK |
 			  HSW_IN_TX |
 			  HSW_IN_TX_CHECKPOINTED))) {
-		config = kvm_x86_ops->pmu_ops->find_arch_event(&pmc->vcpu->arch.pmu,
+		config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
 						      event_select,
 						      unit_mask);
 		if (config != PERF_COUNT_HW_MAX)
@@ -305,4 +307,3 @@  void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 {
 	kvm_pmu_reset(vcpu);
 }
-
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index a19a0d5fdb07..f96e1f962587 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -5,9 +5,31 @@ 
 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
 
-/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
+/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
 
+struct kvm_event_hw_type_mapping {
+	u8 eventsel;
+	u8 unit_mask;
+	unsigned event_type;
+};
+
+struct kvm_pmu_ops {
+	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
+				    u8 unit_mask);
+	unsigned (*find_fixed_event)(int idx);
+	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
+	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
+	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
+	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
+	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
+	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+	void (*refresh)(struct kvm_vcpu *vcpu);
+	void (*init)(struct kvm_vcpu *vcpu);
+	void (*reset)(struct kvm_vcpu *vcpu);
+};
+
 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -48,7 +70,7 @@  static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
 
 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
 {
-	return kvm_x86_ops->pmu_ops->pmc_enabled(pmc);
+	return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
 }
 
 /* returns general purpose PMC with the specified MSR. Note that it can be
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index ff5cea79a104..886aa25a7131 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -57,7 +57,7 @@  static unsigned amd_find_fixed_event(int idx)
 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
  */
-static bool amd_pmc_enabled(struct kvm_pmc *pmc)
+static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 {
 	return true;
 }
@@ -194,7 +194,7 @@  static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 struct kvm_pmu_ops amd_pmu_ops = {
 	.find_arch_event = amd_find_arch_event,
 	.find_fixed_event = amd_find_fixed_event,
-	.pmc_enabled = amd_pmc_enabled,
+	.pmc_is_enabled = amd_pmc_is_enabled,
 	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
 	.is_valid_msr_idx = amd_is_valid_msr_idx,
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 51b4729493db..ab38af4f4947 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -63,9 +63,8 @@  static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 
 	pmu->global_ctrl = data;
 
-	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
+	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
 		reprogram_counter(pmu, bit);
-	}
 }
 
 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
@@ -88,7 +87,6 @@  static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
 
 static unsigned intel_find_fixed_event(int idx)
 {
-
 	if (idx >= ARRAY_SIZE(fixed_pmc_events))
 		return PERF_COUNT_HW_MAX;
 
@@ -96,7 +94,7 @@  static unsigned intel_find_fixed_event(int idx)
 }
 
 /* check if a PMC is enabled by comparising it with globl_ctrl bits. */
-static bool intel_pmc_enabled(struct kvm_pmc *pmc)
+static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 
@@ -347,7 +345,7 @@  static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 struct kvm_pmu_ops intel_pmu_ops = {
 	.find_arch_event = intel_find_arch_event,
 	.find_fixed_event = intel_find_fixed_event,
-	.pmc_enabled = intel_pmc_enabled,
+	.pmc_is_enabled = intel_pmc_is_enabled,
 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
 	.is_valid_msr_idx = intel_is_valid_msr_idx,