@@ -53,14 +53,14 @@ struct shadow_vmcs_field {
u16 offset;
};
static struct shadow_vmcs_field shadow_read_only_fields[] = {
-#define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
+#define SHADOW_FIELD_RO(x, y, c) { x, offsetof(struct vmcs12, y) },
#include "vmcs_shadow_fields.h"
};
static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
static struct shadow_vmcs_field shadow_read_write_fields[] = {
-#define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
+#define SHADOW_FIELD_RW(x, y, c) { x, offsetof(struct vmcs12, y) },
#include "vmcs_shadow_fields.h"
};
static int max_shadow_read_write_fields =
@@ -83,6 +83,17 @@ static void init_vmcs_shadow_fields(void)
pr_err("Missing field from shadow_read_only_field %x\n",
field + 1);
+ switch (field) {
+#define SHADOW_FIELD_RO(x, y, c) \
+ case x: \
+ if (!(c)) \
+ continue; \
+ break;
+#include "vmcs_shadow_fields.h"
+ default:
+ break;
+ }
+
clear_bit(field, vmx_vmread_bitmap);
if (field & 1)
#ifdef CONFIG_X86_64
@@ -114,18 +125,12 @@ static void init_vmcs_shadow_fields(void)
* on bare metal.
*/
switch (field) {
- case GUEST_PML_INDEX:
- if (!cpu_has_vmx_pml())
- continue;
- break;
- case VMX_PREEMPTION_TIMER_VALUE:
- if (!cpu_has_vmx_preemption_timer())
- continue;
- break;
- case GUEST_INTR_STATUS:
- if (!cpu_has_vmx_apicv())
- continue;
+#define SHADOW_FIELD_RW(x, y, c) \
+ case x: \
+ if (!(c)) \
+ continue; \
break;
+#include "vmcs_shadow_fields.h"
default:
break;
}
@@ -1585,6 +1590,18 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
for (i = 0; i < max_shadow_read_write_fields; i++) {
field = shadow_read_write_fields[i];
+
+ switch (field.encoding) {
+#define SHADOW_FIELD_RW(x, y, c) \
+ case x: \
+ if (!(c)) \
+ continue; \
+ break;
+#include "vmcs_shadow_fields.h"
+ default:
+ break;
+ }
+
val = __vmcs_readl(field.encoding);
vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
}
@@ -1619,6 +1636,23 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
for (q = 0; q < ARRAY_SIZE(fields); q++) {
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
+
+ switch (field.encoding) {
+#define SHADOW_FIELD_RO(x, y, c) \
+ case x: \
+ if (!(c)) \
+ continue; \
+ break;
+#define SHADOW_FIELD_RW(x, y, c) \
+ case x: \
+ if (!(c)) \
+ continue; \
+ break;
+#include "vmcs_shadow_fields.h"
+ default:
+ break;
+ }
+
val = vmcs12_read_any(vmcs12, field.encoding,
field.offset);
__vmcs_writel(field.encoding, val);
@@ -5492,9 +5526,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
static bool is_shadow_field_rw(unsigned long field)
{
switch (field) {
-#define SHADOW_FIELD_RW(x, y) case x:
+#define SHADOW_FIELD_RW(x, y, c) \
+ case x: \
+ return c;
#include "vmcs_shadow_fields.h"
- return true;
default:
break;
}
@@ -5504,9 +5539,10 @@ static bool is_shadow_field_rw(unsigned long field)
static bool is_shadow_field_ro(unsigned long field)
{
switch (field) {
-#define SHADOW_FIELD_RO(x, y) case x:
+#define SHADOW_FIELD_RO(x, y, c) \
+ case x: \
+ return c;
#include "vmcs_shadow_fields.h"
- return true;
default:
break;
}
@@ -3,10 +3,10 @@ BUILD_BUG_ON(1)
#endif
#ifndef SHADOW_FIELD_RO
-#define SHADOW_FIELD_RO(x, y)
+#define SHADOW_FIELD_RO(x, y, c)
#endif
#ifndef SHADOW_FIELD_RW
-#define SHADOW_FIELD_RW(x, y)
+#define SHADOW_FIELD_RW(x, y, c)
#endif
/*
@@ -32,48 +32,48 @@ BUILD_BUG_ON(1)
*/
/* 16-bits */
-SHADOW_FIELD_RW(GUEST_INTR_STATUS, guest_intr_status)
-SHADOW_FIELD_RW(GUEST_PML_INDEX, guest_pml_index)
-SHADOW_FIELD_RW(HOST_FS_SELECTOR, host_fs_selector)
-SHADOW_FIELD_RW(HOST_GS_SELECTOR, host_gs_selector)
+SHADOW_FIELD_RW(GUEST_INTR_STATUS, guest_intr_status, cpu_has_vmx_apicv())
+SHADOW_FIELD_RW(GUEST_PML_INDEX, guest_pml_index, cpu_has_vmx_pml())
+SHADOW_FIELD_RW(HOST_FS_SELECTOR, host_fs_selector, true)
+SHADOW_FIELD_RW(HOST_GS_SELECTOR, host_gs_selector, true)
/* 32-bits */
-SHADOW_FIELD_RO(VM_EXIT_REASON, vm_exit_reason)
-SHADOW_FIELD_RO(VM_EXIT_INTR_INFO, vm_exit_intr_info)
-SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len)
-SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field)
-SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code)
-SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code)
-SHADOW_FIELD_RO(GUEST_CS_AR_BYTES, guest_cs_ar_bytes)
-SHADOW_FIELD_RO(GUEST_SS_AR_BYTES, guest_ss_ar_bytes)
-SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control)
-SHADOW_FIELD_RW(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control)
-SHADOW_FIELD_RW(EXCEPTION_BITMAP, exception_bitmap)
-SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code)
-SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field)
-SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len)
-SHADOW_FIELD_RW(TPR_THRESHOLD, tpr_threshold)
-SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info)
-SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value)
+SHADOW_FIELD_RO(VM_EXIT_REASON, vm_exit_reason, true)
+SHADOW_FIELD_RO(VM_EXIT_INTR_INFO, vm_exit_intr_info, true)
+SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len, true)
+SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code, true)
+SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field, true)
+SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code, true)
+SHADOW_FIELD_RO(GUEST_CS_AR_BYTES, guest_cs_ar_bytes, true)
+SHADOW_FIELD_RO(GUEST_SS_AR_BYTES, guest_ss_ar_bytes, true)
+SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control, true)
+SHADOW_FIELD_RW(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control, true)
+SHADOW_FIELD_RW(EXCEPTION_BITMAP, exception_bitmap, true)
+SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code, true)
+SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field, true)
+SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len, true)
+SHADOW_FIELD_RW(TPR_THRESHOLD, tpr_threshold, true)
+SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info, true)
+SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value, cpu_has_vmx_preemption_timer())
/* Natural width */
-SHADOW_FIELD_RO(EXIT_QUALIFICATION, exit_qualification)
-SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS, guest_linear_address)
-SHADOW_FIELD_RW(GUEST_RIP, guest_rip)
-SHADOW_FIELD_RW(GUEST_RSP, guest_rsp)
-SHADOW_FIELD_RW(GUEST_CR0, guest_cr0)
-SHADOW_FIELD_RW(GUEST_CR3, guest_cr3)
-SHADOW_FIELD_RW(GUEST_CR4, guest_cr4)
-SHADOW_FIELD_RW(GUEST_RFLAGS, guest_rflags)
-SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK, cr0_guest_host_mask)
-SHADOW_FIELD_RW(CR0_READ_SHADOW, cr0_read_shadow)
-SHADOW_FIELD_RW(CR4_READ_SHADOW, cr4_read_shadow)
-SHADOW_FIELD_RW(HOST_FS_BASE, host_fs_base)
-SHADOW_FIELD_RW(HOST_GS_BASE, host_gs_base)
+SHADOW_FIELD_RO(EXIT_QUALIFICATION, exit_qualification, true)
+SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS, guest_linear_address, true)
+SHADOW_FIELD_RW(GUEST_RIP, guest_rip, true)
+SHADOW_FIELD_RW(GUEST_RSP, guest_rsp, true)
+SHADOW_FIELD_RW(GUEST_CR0, guest_cr0, true)
+SHADOW_FIELD_RW(GUEST_CR3, guest_cr3, true)
+SHADOW_FIELD_RW(GUEST_CR4, guest_cr4, true)
+SHADOW_FIELD_RW(GUEST_RFLAGS, guest_rflags, true)
+SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK, cr0_guest_host_mask, true)
+SHADOW_FIELD_RW(CR0_READ_SHADOW, cr0_read_shadow, true)
+SHADOW_FIELD_RW(CR4_READ_SHADOW, cr4_read_shadow, true)
+SHADOW_FIELD_RW(HOST_FS_BASE, host_fs_base, true)
+SHADOW_FIELD_RW(HOST_GS_BASE, host_gs_base, true)
/* 64-bit */
-SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS, guest_physical_address)
-SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH, guest_physical_address)
+SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS, guest_physical_address, true)
+SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH, guest_physical_address, true)
#undef SHADOW_FIELD_RO
#undef SHADOW_FIELD_RW
Add a prerequisite for accessing VMCS fields referenced in macros SHADOW_FIELD_R[OW], because a VMCS field may not exist on some CPUs. Signed-off-by: Xin Li <xin3.li@intel.com> --- arch/x86/kvm/vmx/nested.c | 70 ++++++++++++++++++------ arch/x86/kvm/vmx/vmcs_shadow_fields.h | 76 +++++++++++++-------------- 2 files changed, 91 insertions(+), 55 deletions(-)