@@ -613,6 +613,19 @@ static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
msr_bitmap_l0, msr);
}
+#define nested_vmx_merge_msr_bitmaps(msr, type) \
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, \
+ msr_bitmap_l0, msr, type)
+
+#define nested_vmx_merge_msr_bitmaps_read(msr) \
+ nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_R)
+
+#define nested_vmx_merge_msr_bitmaps_write(msr) \
+ nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_W)
+
+#define nested_vmx_merge_msr_bitmaps_rw(msr) \
+ nested_vmx_merge_msr_bitmaps(msr, MSR_TYPE_RW)
+
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@@ -696,23 +709,13 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
* other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
*/
#ifdef CONFIG_X86_64
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_FS_BASE, MSR_TYPE_RW);
-
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_GS_BASE, MSR_TYPE_RW);
-
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_FS_BASE);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_GS_BASE);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_KERNEL_GS_BASE);
#endif
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
-
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_PRED_CMD, MSR_TYPE_W);
-
- nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_FLUSH_CMD, MSR_TYPE_W);
+ nested_vmx_merge_msr_bitmaps_rw(MSR_IA32_SPEC_CTRL);
+ nested_vmx_merge_msr_bitmaps_write(MSR_IA32_PRED_CMD);
+ nested_vmx_merge_msr_bitmaps_write(MSR_IA32_FLUSH_CMD);
kvm_vcpu_unmap(vcpu, &map);