diff mbox series

[RFC,1/2] nSVM: introduce struct vmcb_ctrl_area_cached

Message ID 20210917124956.2042052-2-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: nSVM: use vmcb_ctrl_area_cached instead | expand

Commit Message

Emanuele Giuseppe Esposito Sept. 17, 2021, 12:49 p.m. UTC
This structure will replace vmcb_control_area in
svm_nested_state, providing only the fields that are actually
used by the nested state. This avoids having and copying around
uninitialized fields. The cost of this, however, is that all
functions (in this case vmcb_is_intercept) expect the old
structure, so they need to be duplicated.

Introduce also copy_vmcb_ctrl_area_cached(), useful to copy
vmcb_ctrl_area_cached fields in vmcb_control_area. This will
be used in the next patch.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 arch/x86/kvm/svm/nested.c | 32 ++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.h    | 31 +++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+)

Comments

Paolo Bonzini Sept. 28, 2021, 4:14 p.m. UTC | #1
On 17/09/21 14:49, Emanuele Giuseppe Esposito wrote:
> +static inline bool vmcb_is_intercept_cached(struct vmcb_ctrl_area_cached *control, u32 bit)
> +{
> +	return vmcb_is_intercept((struct vmcb_control_area *) control,
> +				 bit);
> +}
> +

This is quite dangerous, because it expects that the offset is the same 
between vmcb_control_area and vmcb_ctrl_area_cached.  You can just 
duplicate the implementation (which is essentially just a test_bit), and 
call the function

static inline bool vmcb12_is_intercept(struct kvm_vcpu *vcpu, u32 bit)

Likewise, nested_vmcb_check_controls can just take the vcpu since you 
moved nested_load_control_from_vmcb12 earlier.

Finally, copy_vmcb_control_area can be inlined, and its caller 
nested_load_control_from_vmcb12 can stop copying the ASID.  There is 
only one call to it since commit 4995a3685f1b ("KVM: SVM: Use a separate 
vmcb for the nested L2 guest", 2021-03-15).

Thanks,

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9b2c4895d5d9..d06a95156535 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1224,6 +1224,38 @@  int nested_svm_exit_special(struct vcpu_svm *svm)
 	return NESTED_EXIT_CONTINUE;
 }
 
+/* Inverse operation of copy_vmcb_control_area(). asid is copied too.  */
+static void copy_vmcb_ctrl_area_cached(struct vmcb_control_area *from,
+				       struct vmcb_ctrl_area_cached *dst)
+{
+	unsigned int i;
+
+	for (i = 0; i < MAX_INTERCEPT; i++)
+		dst->intercepts[i] = from->intercepts[i];
+
+	dst->iopm_base_pa         = from->iopm_base_pa;
+	dst->msrpm_base_pa        = from->msrpm_base_pa;
+	dst->tsc_offset           = from->tsc_offset;
+	dst->asid                 = from->asid;
+	dst->tlb_ctl              = from->tlb_ctl;
+	dst->int_ctl              = from->int_ctl;
+	dst->int_vector           = from->int_vector;
+	dst->int_state            = from->int_state;
+	dst->exit_code            = from->exit_code;
+	dst->exit_code_hi         = from->exit_code_hi;
+	dst->exit_info_1          = from->exit_info_1;
+	dst->exit_info_2          = from->exit_info_2;
+	dst->exit_int_info        = from->exit_int_info;
+	dst->exit_int_info_err    = from->exit_int_info_err;
+	dst->nested_ctl           = from->nested_ctl;
+	dst->event_inj            = from->event_inj;
+	dst->event_inj_err        = from->event_inj_err;
+	dst->nested_cr3           = from->nested_cr3;
+	dst->virt_ext              = from->virt_ext;
+	dst->pause_filter_count   = from->pause_filter_count;
+	dst->pause_filter_thresh  = from->pause_filter_thresh;
+}
+
 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
 				struct kvm_nested_state __user *user_kvm_nested_state,
 				u32 user_data_size)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 9982e6136724..a00be2516cc6 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -112,6 +112,31 @@  struct vmcb_save_area_cached {
 	u64 dr6;
 };
 
+struct vmcb_ctrl_area_cached {
+	u32 intercepts[MAX_INTERCEPT];
+	u16 pause_filter_thresh;
+	u16 pause_filter_count;
+	u64 iopm_base_pa;
+	u64 msrpm_base_pa;
+	u64 tsc_offset;
+	u32 asid;
+	u8 tlb_ctl;
+	u32 int_ctl;
+	u32 int_vector;
+	u32 int_state;
+	u32 exit_code;
+	u32 exit_code_hi;
+	u64 exit_info_1;
+	u64 exit_info_2;
+	u32 exit_int_info;
+	u32 exit_int_info_err;
+	u64 nested_ctl;
+	u32 event_inj;
+	u32 event_inj_err;
+	u64 nested_cr3;
+	u64 virt_ext;
+};
+
 struct svm_nested_state {
 	struct kvm_vmcb_info vmcb02;
 	u64 hsave_msr;
@@ -304,6 +329,12 @@  static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
 	return test_bit(bit, (unsigned long *)&control->intercepts);
 }
 
+static inline bool vmcb_is_intercept_cached(struct vmcb_ctrl_area_cached *control, u32 bit)
+{
+	return vmcb_is_intercept((struct vmcb_control_area *) control,
+				 bit);
+}
+
 static inline void set_dr_intercepts(struct vcpu_svm *svm)
 {
 	struct vmcb *vmcb = svm->vmcb01.ptr;