diff mbox series

[XEN,v2,08/15] x86/vpmu: guard vmx/svm calls with cpu_has_{vmx,svm}

Message ID fbd17194026a7e61bac2198e3b468d498f45d064.1715761386.git.Sergiy_Kibrik@epam.com (mailing list archive)
State Superseded
Headers show
Series x86: make cpu virtualization support configurable | expand

Commit Message

Sergiy Kibrik May 15, 2024, 9:14 a.m. UTC
If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
PV guests. Yet some calls to vmx/svm-related routines needs to be guarded then.

Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
---
 xen/arch/x86/cpu/vpmu_amd.c   |  8 ++++----
 xen/arch/x86/cpu/vpmu_intel.c | 20 ++++++++++----------
 2 files changed, 14 insertions(+), 14 deletions(-)

Comments

Stefano Stabellini May 16, 2024, 12:44 a.m. UTC | #1
On Wed, 15 May 2024, Sergiy Kibrik wrote:
> If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
> PV guests. Yet some calls to vmx/svm-related routines needs to be guarded then.
> 
> Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>

Question to the x86 maintainers: are we sure we want to support the case
where VMX/SVM is disabled in the build but still we want to run PV
guests with vPMU?

If the question is not, could we simplify this simply by making vpmu_amd
dependent on CONFIG_SVM and vpmu_intel dependent on CONFIG_VMX?

I realize that it is possible and technically correct to disable
CONFIG_SVM (or VMX) to run on AMD hardware (or Intel) with plain PV
guests only. But do we want to support it? I wonder if we could make
things easier by avoiding to support this configuration until somebody
asks for it.


> ---
>  xen/arch/x86/cpu/vpmu_amd.c   |  8 ++++----
>  xen/arch/x86/cpu/vpmu_intel.c | 20 ++++++++++----------
>  2 files changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
> index db2fa420e1..40b0c8932f 100644
> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -290,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
>      context_save(v);
>  
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         is_msr_bitmap_on(vpmu) )
> +         is_msr_bitmap_on(vpmu) && cpu_has_svm )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      if ( to_guest )
> @@ -363,7 +363,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>              return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
>  
> -        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>               amd_vpmu_set_msr_bitmap(v);
>      }
>  
> @@ -372,7 +372,7 @@ static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>          (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
>      {
>          vpmu_reset(vpmu, VPMU_RUNNING);
> -        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>               amd_vpmu_unset_msr_bitmap(v);
>          release_pmu_ownership(PMU_OWNER_HVM);
>      }
> @@ -415,7 +415,7 @@ static void cf_check amd_vpmu_destroy(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> -    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      xfree(vpmu->context);
> diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
> index cd414165df..10c34a5691 100644
> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -269,7 +269,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
>      if ( !is_hvm_vcpu(v) )
>          rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
>      /* Save MSR to private context to make it fork-friendly */
> -    else if ( mem_sharing_enabled(v->domain) )
> +    else if ( mem_sharing_enabled(v->domain) && cpu_has_vmx )
>          vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                             &core2_vpmu_cxt->global_ctrl);
>  }
> @@ -288,7 +288,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
>  
>      /* Unset PMU MSR bitmap to trap lazy load. */
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         cpu_has_vmx_msr_bitmap )
> +         cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v);
>  
>      if ( to_guest )
> @@ -333,7 +333,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
>          wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
>      }
>      /* Restore MSR from context when used with a fork */
> -    else if ( mem_sharing_is_fork(v->domain) )
> +    else if ( mem_sharing_is_fork(v->domain) && cpu_has_vmx )
>          vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                              core2_vpmu_cxt->global_ctrl);
>  }
> @@ -442,7 +442,7 @@ static int cf_check core2_vpmu_alloc_resource(struct vcpu *v)
>      if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
>          return 0;
>  
> -    if ( is_hvm_vcpu(v) )
> +    if ( is_hvm_vcpu(v) && cpu_has_vmx )
>      {
>          if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
>              goto out_err;
> @@ -513,7 +513,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
>          __core2_vpmu_load(current);
>          vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
>  
> -        if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
> +        if ( is_hvm_vcpu(current) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>              core2_vpmu_set_msr_bitmap(current);
>      }
>      return 1;
> @@ -584,7 +584,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>          if ( msr_content & fixed_ctrl_mask )
>              return -EINVAL;
>  
> -        if ( is_hvm_vcpu(v) )
> +        if ( is_hvm_vcpu(v) && cpu_has_vmx )
>              vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                                 &core2_vpmu_cxt->global_ctrl);
>          else
> @@ -653,7 +653,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>              if ( blocked )
>                  return -EINVAL;
>  
> -            if ( is_hvm_vcpu(v) )
> +            if ( is_hvm_vcpu(v) && cpu_has_vmx)
>                  vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
>                                     &core2_vpmu_cxt->global_ctrl);
>              else
> @@ -672,7 +672,7 @@ static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>          wrmsrl(msr, msr_content);
>      else
>      {
> -        if ( is_hvm_vcpu(v) )
> +        if ( is_hvm_vcpu(v) && cpu_has_vmx )
>              vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
>          else
>              wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> @@ -706,7 +706,7 @@ static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
>              *msr_content = core2_vpmu_cxt->global_status;
>              break;
>          case MSR_CORE_PERF_GLOBAL_CTRL:
> -            if ( is_hvm_vcpu(v) )
> +            if ( is_hvm_vcpu(v) && cpu_has_vmx )
>                  vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
>              else
>                  rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
> @@ -808,7 +808,7 @@ static void cf_check core2_vpmu_destroy(struct vcpu *v)
>      vpmu->context = NULL;
>      xfree(vpmu->priv_context);
>      vpmu->priv_context = NULL;
> -    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
> +    if ( is_hvm_vcpu(v) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v);
>      release_pmu_ownership(PMU_OWNER_HVM);
>      vpmu_clear(vpmu);
> -- 
> 2.25.1
>
Jan Beulich May 16, 2024, 7:25 a.m. UTC | #2
On 16.05.2024 02:44, Stefano Stabellini wrote:
> On Wed, 15 May 2024, Sergiy Kibrik wrote:
>> If VMX/SVM disabled in the build, we may still want to have vPMU drivers for
>> PV guests. Yet some calls to vmx/svm-related routines needs to be guarded then.
>>
>> Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
> 
> Question to the x86 maintainers: are we sure we want to support the case
> where VMX/SVM is disabled in the build but still we want to run PV
> guests with vPMU?
> 
> If the question is not, could we simplify this simply by making vpmu_amd
> dependent on CONFIG_SVM and vpmu_intel dependent on CONFIG_VMX?
> 
> I realize that it is possible and technically correct to disable
> CONFIG_SVM (or VMX) to run on AMD hardware (or Intel) with plain PV
> guests only. But do we want to support it? I wonder if we could make
> things easier by avoiding to support this configuration until somebody
> asks for it.

I think we want to allow for such a configuration; whether that's deemed
a supported one is an orthogonal question. Much like you can set PV=n and
HVM=n at the same time, yielding a largely useless hypervisor (where
perhaps even the question of whether it's support may raise eyebrows).

Jan
Jan Beulich May 16, 2024, 11:23 a.m. UTC | #3
On 15.05.2024 11:14, Sergiy Kibrik wrote:
> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -290,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
>      context_save(v);
>  
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         is_msr_bitmap_on(vpmu) )
> +         is_msr_bitmap_on(vpmu) && cpu_has_svm )
>          amd_vpmu_unset_msr_bitmap(v);

Assuming the change in the earlier patch can actually be established to be
safe, along the lines of an earlier comment from Stefano the addition may
want to move earlier in the overall conditionals (here and below). In fact
I wonder whether it wouldn't be neater to have

#define is_svm_vcpu(v) (cpu_has_svm && is_hvm_vcpu(v))

at the top of the file, and then use that throughout to replace is_hvm_vcpu().
Same on the Intel side then, obviously.

> @@ -288,7 +288,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
>  
>      /* Unset PMU MSR bitmap to trap lazy load. */
>      if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> -         cpu_has_vmx_msr_bitmap )
> +         cpu_has_vmx && cpu_has_vmx_msr_bitmap )

Aren't you elsewhere adding IS_ENABLED() to cpu_has_vmx_*, rendering this (and
similar changes further down) redundant?

Jan
Sergiy Kibrik June 3, 2024, 9:02 a.m. UTC | #4
16.05.24 14:23, Jan Beulich:
> On 15.05.2024 11:14, Sergiy Kibrik wrote:
>> --- a/xen/arch/x86/cpu/vpmu_amd.c
>> +++ b/xen/arch/x86/cpu/vpmu_amd.c
>> @@ -290,7 +290,7 @@ static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
>>       context_save(v);
>>   
>>       if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
>> -         is_msr_bitmap_on(vpmu) )
>> +         is_msr_bitmap_on(vpmu) && cpu_has_svm )
>>           amd_vpmu_unset_msr_bitmap(v);
> 
> Assuming the change in the earlier patch can actually be established to be
> safe, along the lines of an earlier comment from Stefano the addition may
> want to move earlier in the overall conditionals (here and below). In fact
> I wonder whether it wouldn't be neater to have
> 
> #define is_svm_vcpu(v) (cpu_has_svm && is_hvm_vcpu(v))
> 
> at the top of the file, and then use that throughout to replace is_hvm_vcpu().
> Same on the Intel side then, obviously.
> 

sure, will do

>> @@ -288,7 +288,7 @@ static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
>>   
>>       /* Unset PMU MSR bitmap to trap lazy load. */
>>       if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
>> -         cpu_has_vmx_msr_bitmap )
>> +         cpu_has_vmx && cpu_has_vmx_msr_bitmap )
> 
> Aren't you elsewhere adding IS_ENABLED() to cpu_has_vmx_*, rendering this (and
> similar changes further down) redundant?
> 

indeed, I can move this change later in the series & reuse checks 
provided by cpu_has_vmx_msr_bitmap

   -Sergiy
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index db2fa420e1..40b0c8932f 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -290,7 +290,7 @@  static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
     context_save(v);
 
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
-         is_msr_bitmap_on(vpmu) )
+         is_msr_bitmap_on(vpmu) && cpu_has_svm )
         amd_vpmu_unset_msr_bitmap(v);
 
     if ( to_guest )
@@ -363,7 +363,7 @@  static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
             return 0;
         vpmu_set(vpmu, VPMU_RUNNING);
 
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
              amd_vpmu_set_msr_bitmap(v);
     }
 
@@ -372,7 +372,7 @@  static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
     {
         vpmu_reset(vpmu, VPMU_RUNNING);
-        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
              amd_vpmu_unset_msr_bitmap(v);
         release_pmu_ownership(PMU_OWNER_HVM);
     }
@@ -415,7 +415,7 @@  static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
-    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
+    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) && cpu_has_svm )
         amd_vpmu_unset_msr_bitmap(v);
 
     xfree(vpmu->context);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index cd414165df..10c34a5691 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -269,7 +269,7 @@  static inline void __core2_vpmu_save(struct vcpu *v)
     if ( !is_hvm_vcpu(v) )
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
     /* Save MSR to private context to make it fork-friendly */
-    else if ( mem_sharing_enabled(v->domain) )
+    else if ( mem_sharing_enabled(v->domain) && cpu_has_vmx )
         vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                            &core2_vpmu_cxt->global_ctrl);
 }
@@ -288,7 +288,7 @@  static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
 
     /* Unset PMU MSR bitmap to trap lazy load. */
     if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
-         cpu_has_vmx_msr_bitmap )
+         cpu_has_vmx && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
 
     if ( to_guest )
@@ -333,7 +333,7 @@  static inline void __core2_vpmu_load(struct vcpu *v)
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
     }
     /* Restore MSR from context when used with a fork */
-    else if ( mem_sharing_is_fork(v->domain) )
+    else if ( mem_sharing_is_fork(v->domain) && cpu_has_vmx )
         vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                             core2_vpmu_cxt->global_ctrl);
 }
@@ -442,7 +442,7 @@  static int cf_check core2_vpmu_alloc_resource(struct vcpu *v)
     if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
         return 0;
 
-    if ( is_hvm_vcpu(v) )
+    if ( is_hvm_vcpu(v) && cpu_has_vmx )
     {
         if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, 0) )
             goto out_err;
@@ -513,7 +513,7 @@  static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
         __core2_vpmu_load(current);
         vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 
-        if ( is_hvm_vcpu(current) && cpu_has_vmx_msr_bitmap )
+        if ( is_hvm_vcpu(current) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
             core2_vpmu_set_msr_bitmap(current);
     }
     return 1;
@@ -584,7 +584,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         if ( msr_content & fixed_ctrl_mask )
             return -EINVAL;
 
-        if ( is_hvm_vcpu(v) )
+        if ( is_hvm_vcpu(v) && cpu_has_vmx )
             vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                &core2_vpmu_cxt->global_ctrl);
         else
@@ -653,7 +653,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
             if ( blocked )
                 return -EINVAL;
 
-            if ( is_hvm_vcpu(v) )
+            if ( is_hvm_vcpu(v) && cpu_has_vmx)
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
                                    &core2_vpmu_cxt->global_ctrl);
             else
@@ -672,7 +672,7 @@  static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
         wrmsrl(msr, msr_content);
     else
     {
-        if ( is_hvm_vcpu(v) )
+        if ( is_hvm_vcpu(v) && cpu_has_vmx )
             vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
         else
             wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -706,7 +706,7 @@  static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
             *msr_content = core2_vpmu_cxt->global_status;
             break;
         case MSR_CORE_PERF_GLOBAL_CTRL:
-            if ( is_hvm_vcpu(v) )
+            if ( is_hvm_vcpu(v) && cpu_has_vmx )
                 vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
             else
                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
@@ -808,7 +808,7 @@  static void cf_check core2_vpmu_destroy(struct vcpu *v)
     vpmu->context = NULL;
     xfree(vpmu->priv_context);
     vpmu->priv_context = NULL;
-    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
+    if ( is_hvm_vcpu(v) && cpu_has_vmx && cpu_has_vmx_msr_bitmap )
         core2_vpmu_unset_msr_bitmap(v);
     release_pmu_ownership(PMU_OWNER_HVM);
     vpmu_clear(vpmu);