diff mbox series

[XEN,v4,11/14] x86/vpmu: guard calls to vmx/svm functions

Message ID 81999eb47b590afe3b269c33f7d166c9882cafdc.1720501197.git.Sergiy_Kibrik@epam.com (mailing list archive)
State Superseded
Headers show
Series x86: make CPU virtualisation support configurable | expand

Commit Message

Sergiy Kibrik July 9, 2024, 6:07 a.m. UTC
If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
PV guests. Yet in such case before using VMX/SVM features and functions we have
to explicitly check if they're available in the build. For this purpose
(and also not to complicate conditionals) two helpers introduced --
is_{vmx,svm}_vcpu(v) that check both HVM & VMX/SVM conditions at the same time,
and they replace is_hvm_vcpu(v) macro where needed.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
changes in v4:
 - use IS_ENABLED(CONFIG_{VMX,SVM}) instead of using_{vmx,svm}
 - fix typo
changes in v3:
 - introduced macro is_{vmx,svm}_vcpu(v)
 - changed description
 - reordered patch, do not modify conditionals w/ cpu_has_vmx_msr_bitmap check
---
 xen/arch/x86/cpu/vpmu_amd.c   |  9 +++++----
 xen/arch/x86/cpu/vpmu_intel.c | 16 +++++++++-------
 2 files changed, 14 insertions(+), 11 deletions(-)

Comments

Jan Beulich July 22, 2024, 11:55 a.m. UTC | #1
On 09.07.2024 08:07, Sergiy Kibrik wrote:
> @@ -363,7 +364,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>              return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
>  
> -        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
>               amd_vpmu_set_msr_bitmap(v);
>      }

Up from here there's another is_hvm_vcpu(). I think you want to change all
of them, for consistency.

> @@ -269,7 +271,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
>      if ( !is_hvm_vcpu(v) )
>          rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
>      /* Save MSR to private context to make it fork-friendly */
> -    else if ( mem_sharing_enabled(v->domain) )
> +    else if ( is_vmx_vcpu(v) && mem_sharing_enabled(v->domain) )
>          vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                             &core2_vpmu_cxt->global_ctrl);
>  }

Why don't you change the is_hvm_vcpu() that even is visible in context?
Then this hunk would also actually follow what the description says.

> @@ -333,7 +335,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
>          wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
>      }
>      /* Restore MSR from context when used with a fork */
> -    else if ( mem_sharing_is_fork(v->domain) )
> +    else if ( is_vmx_vcpu(v) && mem_sharing_is_fork(v->domain) )
>          vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                              core2_vpmu_cxt->global_ctrl);
>  }

Same here, and up from here there are again two more places to change
(plus at least one more further down).

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 97e6315bd9..977c2f1e73 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -27,6 +27,7 @@ 
 #define is_pmu_enabled(msr) ((msr) & (1ULL << MSR_F10H_EVNTSEL_EN_SHIFT))
 #define set_guest_mode(msr) ((msr) |= (1ULL << MSR_F10H_EVNTSEL_GO_SHIFT))
 #define is_overflowed(msr) (!((msr) & (1ULL << (MSR_F10H_COUNTER_LENGTH - 1))))
+#define is_svm_vcpu(v) (IS_ENABLED(CONFIG_SVM) && is_hvm_vcpu(v))
 
 static unsigned int __read_mostly num_counters;
 static const u32 __read_mostly *counters;
@@ -289,7 +290,7 @@  static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
 
     context_save(v);
 
-    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_svm_vcpu(v) &&
          is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
@@ -363,7 +364,7 @@  static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
             return 0;
         vpmu_set(vpmu, VPMU_RUNNING);
 
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_set_msr_bitmap(v);
     }
 
@@ -372,7 +373,7 @@  static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
     {
         vpmu_reset(vpmu, VPMU_RUNNING);
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
              amd_vpmu_unset_msr_bitmap(v);
         release_pmu_ownership(PMU_OWNER_HVM);
     }
@@ -415,7 +416,7 @@  static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+    if ( is_svm_vcpu(v) && is_msr_bitmap_on(vpmu) )
         amd_vpmu_unset_msr_bitmap(v);
 
     xfree(vpmu->context);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index cd414165df..db35d22aaf 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -54,6 +54,8 @@ 
 #define MSR_PMC_ALIAS_MASK       (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
 static bool __read_mostly full_width_write;
 
+#define is_vmx_vcpu(v) (IS_ENABLED(CONFIG_VMX) && is_hvm_vcpu(v))
+
 /*
  * MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
  * counters. 4 bits for every counter.
@@ -269,7 +271,7 @@  static inline void __core2_vpmu_save(struct vcpu *v)
     if ( !is_hvm_vcpu(v) )
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
     /* Save MSR to private context to make it fork-friendly */
-    else if ( mem_sharing_enabled(v->domain) )
+    else if ( is_vmx_vcpu(v) && mem_sharing_enabled(v->domain) )
         vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                            &core2_vpmu_cxt->global_ctrl);
 }
@@ -333,7 +335,7 @@  static inline void __core2_vpmu_load(struct vcpu *v)
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
     }
     /* Restore MSR from context when used with a fork */
-    else if ( mem_sharing_is_fork(v->domain) )
+    else if ( is_vmx_vcpu(v) && mem_sharing_is_fork(v->domain) )
         vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                             core2_vpmu_cxt->global_ctrl);
 }
@@ -442,7 +444,7 @@  static int cf_check core2_vpmu_alloc_resource(struct vcpu *v)
     if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
         return 0;
 
-    if ( is_hvm_vcpu(v) )
+    if ( is_vmx_vcpu(v) )
     {
         if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
             goto out_err;
@@ -584,7 +586,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         if ( msr_content & fixed_ctrl_mask )
             return -EINVAL;
 
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                &core2_vpmu_cxt->global_ctrl);
         else
@@ -653,7 +655,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
             if ( blocked )
                 return -EINVAL;
 
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                    &core2_vpmu_cxt->global_ctrl);
             else
@@ -672,7 +674,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         wrmsrl(msr, msr_content);
     else
     {
-        if ( is_hvm_vcpu(v) )
+        if ( is_vmx_vcpu(v) )
             vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
         else
             wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -706,7 +708,7 @@  static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
             *msr_content = core2_vpmu_cxt->global_status;
             break;
         case MSR_CORE_PERF_GLOBAL_CTRL:
-            if ( is_hvm_vcpu(v) )
+            if ( is_vmx_vcpu(v) )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
             else
                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);