diff mbox

kvm: nVMX: Add support for "VMWRITE to any supported field"

Message ID 20180502175730.102024-1-jmattson@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jim Mattson May 2, 2018, 5:57 p.m. UTC
Allow VMWRITE in L1 to modify VM-exit information fields and report
this feature in L1's IA32_VMX_MISC MSR.

Note that this feature is a prerequisite for kvm in L1 to use VMCS
shadowing, once that feature is available.

Signed-off-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/vmx.c               | 87 ++++++++----------------------
 arch/x86/kvm/vmx_shadow_fields.h | 90 +++++++++++++++-----------------
 2 files changed, 64 insertions(+), 113 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c7668806163f..295bb29bf1b6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -720,20 +720,11 @@  static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
 	FIELD(number, name),						\
 	[ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
 
-
-static u16 shadow_read_only_fields[] = {
-#define SHADOW_FIELD_RO(x) x,
-#include "vmx_shadow_fields.h"
-};
-static int max_shadow_read_only_fields =
-	ARRAY_SIZE(shadow_read_only_fields);
-
-static u16 shadow_read_write_fields[] = {
-#define SHADOW_FIELD_RW(x) x,
+static u16 shadow_fields[] = {
+#define SHADOW_FIELD(x) x,
 #include "vmx_shadow_fields.h"
 };
-static int max_shadow_read_write_fields =
-	ARRAY_SIZE(shadow_read_write_fields);
+static int max_shadow_fields = ARRAY_SIZE(shadow_fields);
 
 static const unsigned short vmcs_field_to_offset_table[] = {
 	FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
@@ -3121,6 +3112,7 @@  static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
 		msrs->misc_high);
 	msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
 	msrs->misc_low |=
+		MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
 		VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
 		VMX_MISC_ACTIVITY_HLT;
 	msrs->misc_high = 0;
@@ -3274,6 +3266,9 @@  static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
 	vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
 				   vmx->nested.msrs.misc_high);
 
+	if (!(data & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+		return -EINVAL;
+
 	if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
 		return -EINVAL;
 
@@ -4254,31 +4249,12 @@  static void init_vmcs_shadow_fields(void)
 {
 	int i, j;
 
-	for (i = j = 0; i < max_shadow_read_only_fields; i++) {
-		u16 field = shadow_read_only_fields[i];
-		if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
-		    (i + 1 == max_shadow_read_only_fields ||
-		     shadow_read_only_fields[i + 1] != field + 1))
-			pr_err("Missing field from shadow_read_only_field %x\n",
-			       field + 1);
-
-		clear_bit(field, vmx_vmread_bitmap);
-#ifdef CONFIG_X86_64
-		if (field & 1)
-			continue;
-#endif
-		if (j < i)
-			shadow_read_only_fields[j] = field;
-		j++;
-	}
-	max_shadow_read_only_fields = j;
-
-	for (i = j = 0; i < max_shadow_read_write_fields; i++) {
-		u16 field = shadow_read_write_fields[i];
+	for (i = j = 0; i < max_shadow_fields; i++) {
+		u16 field = shadow_fields[i];
 		if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
-		    (i + 1 == max_shadow_read_write_fields ||
-		     shadow_read_write_fields[i + 1] != field + 1))
-			pr_err("Missing field from shadow_read_write_field %x\n",
+		    (i + 1 == max_shadow_fields ||
+		     shadow_fields[i + 1] != field + 1))
+			pr_err("Missing field from shadow_fields %x\n",
 			       field + 1);
 
 		/*
@@ -4310,10 +4286,10 @@  static void init_vmcs_shadow_fields(void)
 			continue;
 #endif
 		if (j < i)
-			shadow_read_write_fields[j] = field;
+			shadow_fields[j] = field;
 		j++;
 	}
-	max_shadow_read_write_fields = j;
+	max_shadow_fields = j;
 }
 
 static __init int alloc_kvm_area(void)
@@ -7929,14 +7905,13 @@  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 	unsigned long field;
 	u64 field_value;
 	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
-	const u16 *fields = shadow_read_write_fields;
-	const int num_fields = max_shadow_read_write_fields;
+	const u16 *fields = shadow_fields;
 
 	preempt_disable();
 
 	vmcs_load(shadow_vmcs);
 
-	for (i = 0; i < num_fields; i++) {
+	for (i = 0; i < max_shadow_fields; i++) {
 		field = fields[i];
 		field_value = __vmcs_readl(field);
 		vmcs12_write_any(&vmx->vcpu, field, field_value);
@@ -7950,27 +7925,18 @@  static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
 
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
 {
-	const u16 *fields[] = {
-		shadow_read_write_fields,
-		shadow_read_only_fields
-	};
-	const int max_fields[] = {
-		max_shadow_read_write_fields,
-		max_shadow_read_only_fields
-	};
-	int i, q;
+	int i;
 	unsigned long field;
 	u64 field_value = 0;
 	struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+	const u16 *fields = shadow_fields;
 
 	vmcs_load(shadow_vmcs);
 
-	for (q = 0; q < ARRAY_SIZE(fields); q++) {
-		for (i = 0; i < max_fields[q]; i++) {
-			field = fields[q][i];
-			vmcs12_read_any(&vmx->vcpu, field, &field_value);
-			__vmcs_writel(field, field_value);
-		}
+	for (i = 0; i < max_shadow_fields; i++) {
+		field = fields[i];
+		vmcs12_read_any(&vmx->vcpu, field, &field_value);
+		__vmcs_writel(field, field_value);
 	}
 
 	vmcs_clear(shadow_vmcs);
@@ -8071,21 +8037,14 @@  static int handle_vmwrite(struct kvm_vcpu *vcpu)
 		}
 	}
 
-
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
-	if (vmcs_field_readonly(field)) {
-		nested_vmx_failValid(vcpu,
-			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
-		return kvm_skip_emulated_instruction(vcpu);
-	}
-
 	if (vmcs12_write_any(vcpu, field, field_value) < 0) {
 		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
 		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	switch (field) {
-#define SHADOW_FIELD_RW(x) case x:
+#define SHADOW_FIELD(x) case x:
 #include "vmx_shadow_fields.h"
 		/*
 		 * The fields that can be updated by L1 without a vmexit are
diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h
index cd0c75f6d037..146315d80181 100644
--- a/arch/x86/kvm/vmx_shadow_fields.h
+++ b/arch/x86/kvm/vmx_shadow_fields.h
@@ -1,10 +1,3 @@ 
-#ifndef SHADOW_FIELD_RO
-#define SHADOW_FIELD_RO(x)
-#endif
-#ifndef SHADOW_FIELD_RW
-#define SHADOW_FIELD_RW(x)
-#endif
-
 /*
  * We do NOT shadow fields that are modified when L0
  * traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -27,51 +20,50 @@ 
  * branch prediction in vmcs_read_any and vmcs_write_any.
  */
 
-/* 16-bits */
-SHADOW_FIELD_RW(GUEST_CS_SELECTOR)
-SHADOW_FIELD_RW(GUEST_INTR_STATUS)
-SHADOW_FIELD_RW(GUEST_PML_INDEX)
-SHADOW_FIELD_RW(HOST_FS_SELECTOR)
-SHADOW_FIELD_RW(HOST_GS_SELECTOR)
+/* 16-bit */
+SHADOW_FIELD(GUEST_CS_SELECTOR)
+SHADOW_FIELD(GUEST_INTR_STATUS)
+SHADOW_FIELD(GUEST_PML_INDEX)
+SHADOW_FIELD(HOST_FS_SELECTOR)
+SHADOW_FIELD(HOST_GS_SELECTOR)
 
-/* 32-bits */
-SHADOW_FIELD_RO(VM_EXIT_REASON)
-SHADOW_FIELD_RO(VM_EXIT_INTR_INFO)
-SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN)
-SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD)
-SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE)
-SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE)
-SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL)
-SHADOW_FIELD_RW(EXCEPTION_BITMAP)
-SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
-SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
-SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
-SHADOW_FIELD_RW(TPR_THRESHOLD)
-SHADOW_FIELD_RW(GUEST_CS_LIMIT)
-SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
-SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
-SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
+/* 32-bit */
+SHADOW_FIELD(VM_EXIT_REASON)
+SHADOW_FIELD(VM_EXIT_INTR_INFO)
+SHADOW_FIELD(VM_EXIT_INSTRUCTION_LEN)
+SHADOW_FIELD(IDT_VECTORING_INFO_FIELD)
+SHADOW_FIELD(IDT_VECTORING_ERROR_CODE)
+SHADOW_FIELD(VM_EXIT_INTR_ERROR_CODE)
+SHADOW_FIELD(CPU_BASED_VM_EXEC_CONTROL)
+SHADOW_FIELD(EXCEPTION_BITMAP)
+SHADOW_FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE)
+SHADOW_FIELD(VM_ENTRY_INTR_INFO_FIELD)
+SHADOW_FIELD(VM_ENTRY_INSTRUCTION_LEN)
+SHADOW_FIELD(TPR_THRESHOLD)
+SHADOW_FIELD(GUEST_CS_LIMIT)
+SHADOW_FIELD(GUEST_CS_AR_BYTES)
+SHADOW_FIELD(GUEST_INTERRUPTIBILITY_INFO)
+SHADOW_FIELD(VMX_PREEMPTION_TIMER_VALUE)
 
 /* Natural width */
-SHADOW_FIELD_RO(EXIT_QUALIFICATION)
-SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS)
-SHADOW_FIELD_RW(GUEST_RIP)
-SHADOW_FIELD_RW(GUEST_RSP)
-SHADOW_FIELD_RW(GUEST_CR0)
-SHADOW_FIELD_RW(GUEST_CR3)
-SHADOW_FIELD_RW(GUEST_CR4)
-SHADOW_FIELD_RW(GUEST_RFLAGS)
-SHADOW_FIELD_RW(GUEST_CS_BASE)
-SHADOW_FIELD_RW(GUEST_ES_BASE)
-SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK)
-SHADOW_FIELD_RW(CR0_READ_SHADOW)
-SHADOW_FIELD_RW(CR4_READ_SHADOW)
-SHADOW_FIELD_RW(HOST_FS_BASE)
-SHADOW_FIELD_RW(HOST_GS_BASE)
+SHADOW_FIELD(EXIT_QUALIFICATION)
+SHADOW_FIELD(GUEST_LINEAR_ADDRESS)
+SHADOW_FIELD(GUEST_RIP)
+SHADOW_FIELD(GUEST_RSP)
+SHADOW_FIELD(GUEST_CR0)
+SHADOW_FIELD(GUEST_CR3)
+SHADOW_FIELD(GUEST_CR4)
+SHADOW_FIELD(GUEST_RFLAGS)
+SHADOW_FIELD(GUEST_CS_BASE)
+SHADOW_FIELD(GUEST_ES_BASE)
+SHADOW_FIELD(CR0_GUEST_HOST_MASK)
+SHADOW_FIELD(CR0_READ_SHADOW)
+SHADOW_FIELD(CR4_READ_SHADOW)
+SHADOW_FIELD(HOST_FS_BASE)
+SHADOW_FIELD(HOST_GS_BASE)
 
 /* 64-bit */
-SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS)
-SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH)
+SHADOW_FIELD(GUEST_PHYSICAL_ADDRESS)
+SHADOW_FIELD(GUEST_PHYSICAL_ADDRESS_HIGH)
 
-#undef SHADOW_FIELD_RO
-#undef SHADOW_FIELD_RW
+#undef SHADOW_FIELD