diff mbox series

[3/4] x86/vmx: replace enum vmx_msr_intercept_type with the msr access flags

Message ID 20230227075652.3782973-4-burzalodowa@gmail.com (mailing list archive)
State New, archived
Headers show
Series hvm: add hvm_funcs hooks for msr intercept handling | expand

Commit Message

Xenia Ragiadakou Feb. 27, 2023, 7:56 a.m. UTC
Replace enum vmx_msr_intercept_type with the msr access flags, defined
in hvm.h, so that the functions {svm,vmx}_{set,clear}_msr_intercept()
share the same prototype.

No functional change intended.

Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
 xen/arch/x86/cpu/vpmu_intel.c           | 24 +++++++-------
 xen/arch/x86/hvm/vmx/vmcs.c             | 38 ++++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c              | 44 ++++++++++++-------------
 xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 14 ++------
 4 files changed, 54 insertions(+), 66 deletions(-)

Comments

Jan Beulich Feb. 28, 2023, 2:31 p.m. UTC | #1
On 27.02.2023 08:56, Xenia Ragiadakou wrote:
> --- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
> @@ -644,18 +644,8 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
>      return 0;
>  }
>  
> -
> -/* MSR intercept bitmap infrastructure. */
> -enum vmx_msr_intercept_type {
> -    VMX_MSR_R  = 1,
> -    VMX_MSR_W  = 2,
> -    VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
> -};
> -
> -void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
> -                             enum vmx_msr_intercept_type type);
> -void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
> -                           enum vmx_msr_intercept_type type);
> +void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
> +void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);

unsigned int please again for the last parameter each.

Jan
Jan Beulich Feb. 28, 2023, 2:34 p.m. UTC | #2
On 28.02.2023 15:31, Jan Beulich wrote:
> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>> --- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>> @@ -644,18 +644,8 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
>>      return 0;
>>  }
>>  
>> -
>> -/* MSR intercept bitmap infrastructure. */
>> -enum vmx_msr_intercept_type {
>> -    VMX_MSR_R  = 1,
>> -    VMX_MSR_W  = 2,
>> -    VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
>> -};
>> -
>> -void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
>> -                             enum vmx_msr_intercept_type type);
>> -void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
>> -                           enum vmx_msr_intercept_type type);
>> +void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
>> +void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
> 
> unsigned int please again for the last parameter each.

Oh, also, another remark here towards patch 2: Note how the middle parameter
each is "unsigned int msr" here, when in SVM code you make it (kind of leave
it) uint32_t. As per ./CODING_STYLE unsigned int is to be preferred; in any
event both (and the eventual hook) want to agree.

Jan
Xenia Ragiadakou Feb. 28, 2023, 3:07 p.m. UTC | #3
On 2/28/23 16:34, Jan Beulich wrote:
> On 28.02.2023 15:31, Jan Beulich wrote:
>> On 27.02.2023 08:56, Xenia Ragiadakou wrote:
>>> --- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>>> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>>> @@ -644,18 +644,8 @@ static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
>>>       return 0;
>>>   }
>>>   
>>> -
>>> -/* MSR intercept bitmap infrastructure. */
>>> -enum vmx_msr_intercept_type {
>>> -    VMX_MSR_R  = 1,
>>> -    VMX_MSR_W  = 2,
>>> -    VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
>>> -};
>>> -
>>> -void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
>>> -                             enum vmx_msr_intercept_type type);
>>> -void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
>>> -                           enum vmx_msr_intercept_type type);
>>> +void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
>>> +void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
>>
>> unsigned int please again for the last parameter each.
> 
> Oh, also, another remark here towards patch 2: Note how the middle parameter
> each is "unsigned int msr" here, when in SVM code you make it (kind of leave
> it) uint32_t. As per ./CODING_STYLE unsigned int is to be preferred; in any
> event both (and the eventual hook) want to agree.

Thx. I will fix and keep it in mind.

> 
> Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index bcfa187a14..bd91c79a36 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -230,22 +230,22 @@  static void core2_vpmu_set_msr_bitmap(struct vcpu *v)
 
     /* Allow Read/Write PMU Counters MSR Directly. */
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     /* Allow Read PMU Non-global Controls Directly. */
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+        vmx_clear_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
-    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+    vmx_clear_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    vmx_clear_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
@@ -253,21 +253,21 @@  static void core2_vpmu_unset_msr_bitmap(struct vcpu *v)
     unsigned int i;
 
     for ( i = 0; i < fixed_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR0 + i, MSR_RW);
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
     {
-        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_IA32_PERFCTR0 + i, MSR_RW);
 
         if ( full_width_write )
-            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, MSR_IA32_A_PERFCTR0 + i, MSR_RW);
     }
 
     for ( i = 0; i < arch_pmc_cnt; i++ )
-        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), VMX_MSR_R);
+        vmx_set_msr_intercept(v, MSR_P6_EVNTSEL(i), MSR_R);
 
-    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, VMX_MSR_R);
-    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, VMX_MSR_R);
+    vmx_set_msr_intercept(v, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_R);
+    vmx_set_msr_intercept(v, MSR_IA32_DS_AREA, MSR_R);
 }
 
 static inline void __core2_vpmu_save(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index ed71ecfb62..22c12509d5 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -902,8 +902,7 @@  static void vmx_set_host_env(struct vcpu *v)
               (unsigned long)&get_cpu_info()->guest_cpu_user_regs.error_code);
 }
 
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
-                             enum vmx_msr_intercept_type type)
+void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
     struct domain *d = v->domain;
@@ -917,25 +916,24 @@  void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
 
     if ( msr <= 0x1fff )
     {
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             clear_bit(msr, msr_bitmap->read_low);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             clear_bit(msr, msr_bitmap->write_low);
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             clear_bit(msr, msr_bitmap->read_high);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             clear_bit(msr, msr_bitmap->write_high);
     }
     else
         ASSERT(!"MSR out of range for interception\n");
 }
 
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
-                           enum vmx_msr_intercept_type type)
+void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type)
 {
     struct vmx_msr_bitmap *msr_bitmap = v->arch.hvm.vmx.msr_bitmap;
 
@@ -945,17 +943,17 @@  void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
 
     if ( msr <= 0x1fff )
     {
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             set_bit(msr, msr_bitmap->read_low);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             set_bit(msr, msr_bitmap->write_low);
     }
     else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
     {
         msr &= 0x1fff;
-        if ( type & VMX_MSR_R )
+        if ( type & MSR_R )
             set_bit(msr, msr_bitmap->read_high);
-        if ( type & VMX_MSR_W )
+        if ( type & MSR_W )
             set_bit(msr, msr_bitmap->write_high);
     }
     else
@@ -1162,17 +1160,17 @@  static int construct_vmcs(struct vcpu *v)
         v->arch.hvm.vmx.msr_bitmap = msr_bitmap;
         __vmwrite(MSR_BITMAP, virt_to_maddr(msr_bitmap));
 
-        vmx_clear_msr_intercept(v, MSR_FS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_GS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, VMX_MSR_RW);
-        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_FS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_GS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_SHADOW_GS_BASE, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, MSR_RW);
         if ( paging_mode_hap(d) && (!is_iommu_enabled(d) || iommu_snoop) )
-            vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
         if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) &&
              (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) )
-            vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, VMX_MSR_RW);
+            vmx_clear_msr_intercept(v, MSR_IA32_BNDCFGS, MSR_RW);
     }
 
     /* I/O access bitmap. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0ec33bcc18..87c47c002c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -802,7 +802,7 @@  static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
      */
     if ( cp->feat.ibrsb )
     {
-        vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
 
         rc = vmx_add_guest_msr(v, MSR_SPEC_CTRL, 0);
         if ( rc )
@@ -810,7 +810,7 @@  static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
     }
     else
     {
-        vmx_set_msr_intercept(v, MSR_SPEC_CTRL, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_SPEC_CTRL, MSR_RW);
 
         rc = vmx_del_msr(v, MSR_SPEC_CTRL, VMX_MSR_GUEST);
         if ( rc && rc != -ESRCH )
@@ -820,20 +820,20 @@  static void cf_check vmx_cpuid_policy_changed(struct vcpu *v)
 
     /* MSR_PRED_CMD is safe to pass through if the guest knows about it. */
     if ( cp->feat.ibrsb || cp->extd.ibpb )
-        vmx_clear_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_PRED_CMD,  VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_PRED_CMD, MSR_RW);
 
     /* MSR_FLUSH_CMD is safe to pass through if the guest knows about it. */
     if ( cp->feat.l1d_flush )
-        vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_FLUSH_CMD, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_FLUSH_CMD, MSR_RW);
 
     if ( cp->feat.pks )
-        vmx_clear_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+        vmx_clear_msr_intercept(v, MSR_PKRS, MSR_RW);
     else
-        vmx_set_msr_intercept(v, MSR_PKRS, VMX_MSR_RW);
+        vmx_set_msr_intercept(v, MSR_PKRS, MSR_RW);
 
  out:
     vmx_vmcs_exit(v);
@@ -1429,7 +1429,7 @@  static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
 
             vmx_get_guest_pat(v, pat);
             vmx_set_guest_pat(v, uc_pat);
-            vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
 
             wbinvd();               /* flush possibly polluted cache */
             hvm_asid_flush_vcpu(v); /* invalidate memory type cached in TLB */
@@ -1440,7 +1440,7 @@  static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
             v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
             vmx_set_guest_pat(v, *pat);
             if ( !is_iommu_enabled(v->domain) || iommu_snoop )
-                vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
+                vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, MSR_RW);
             hvm_asid_flush_vcpu(v); /* no need to flush cache */
         }
     }
@@ -1906,9 +1906,9 @@  static void cf_check vmx_update_guest_efer(struct vcpu *v)
      * into hardware, clear the read intercept to avoid unnecessary VMExits.
      */
     if ( guest_efer == v->arch.hvm.guest_efer )
-        vmx_clear_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+        vmx_clear_msr_intercept(v, MSR_EFER, MSR_R);
     else
-        vmx_set_msr_intercept(v, MSR_EFER, VMX_MSR_R);
+        vmx_set_msr_intercept(v, MSR_EFER, MSR_R);
 }
 
 void nvmx_enqueue_n2_exceptions(struct vcpu *v, 
@@ -2335,7 +2335,7 @@  static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t msr)
     struct vcpu *v;
 
     for_each_vcpu ( d, v )
-        vmx_set_msr_intercept(v, msr, VMX_MSR_W);
+        vmx_set_msr_intercept(v, msr, MSR_W);
 }
 
 static void cf_check vmx_vcpu_update_eptp(struct vcpu *v)
@@ -3502,17 +3502,17 @@  void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
             {
                 for ( msr = MSR_X2APIC_FIRST;
                       msr <= MSR_X2APIC_LAST; msr++ )
-                    vmx_clear_msr_intercept(v, msr, VMX_MSR_R);
+                    vmx_clear_msr_intercept(v, msr, MSR_R);
 
-                vmx_set_msr_intercept(v, MSR_X2APIC_PPR, VMX_MSR_R);
-                vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, VMX_MSR_R);
-                vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, VMX_MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_PPR, MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_TMICT, MSR_R);
+                vmx_set_msr_intercept(v, MSR_X2APIC_TMCCT, MSR_R);
             }
             if ( cpu_has_vmx_virtual_intr_delivery )
             {
-                vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, VMX_MSR_W);
-                vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, VMX_MSR_W);
-                vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, VMX_MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_TPR, MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_EOI, MSR_W);
+                vmx_clear_msr_intercept(v, MSR_X2APIC_SELF, MSR_W);
             }
         }
         else
@@ -3523,7 +3523,7 @@  void cf_check vmx_vlapic_msr_changed(struct vcpu *v)
            SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE) )
         for ( msr = MSR_X2APIC_FIRST;
               msr <= MSR_X2APIC_LAST; msr++ )
-            vmx_set_msr_intercept(v, msr, VMX_MSR_RW);
+            vmx_set_msr_intercept(v, msr, MSR_RW);
 
     vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
@@ -3659,7 +3659,7 @@  static int cf_check vmx_msr_write_intercept(
                         return X86EMUL_OKAY;
                     }
 
-                    vmx_clear_msr_intercept(v, lbr->base + i, VMX_MSR_RW);
+                    vmx_clear_msr_intercept(v, lbr->base + i, MSR_RW);
                 }
             }
 
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 0a84e74478..e08c506be5 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -644,18 +644,8 @@  static inline int vmx_write_guest_msr(struct vcpu *v, uint32_t msr,
     return 0;
 }
 
-
-/* MSR intercept bitmap infrastructure. */
-enum vmx_msr_intercept_type {
-    VMX_MSR_R  = 1,
-    VMX_MSR_W  = 2,
-    VMX_MSR_RW = VMX_MSR_R | VMX_MSR_W,
-};
-
-void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr,
-                             enum vmx_msr_intercept_type type);
-void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr,
-                           enum vmx_msr_intercept_type type);
+void vmx_clear_msr_intercept(struct vcpu *v, unsigned int msr, int type);
+void vmx_set_msr_intercept(struct vcpu *v, unsigned int msr, int type);
 void vmx_vmcs_switch(paddr_t from, paddr_t to);
 void vmx_set_eoi_exit_bitmap(struct vcpu *v, u8 vector);
 void vmx_clear_eoi_exit_bitmap(struct vcpu *v, u8 vector);