@@ -663,7 +663,7 @@ struct hv_enlightened_vmcs {
u32 idt_vectoring_info_field;
u32 idt_vectoring_error_code;
u32 vm_exit_instruction_len;
- u32 vmx_instruction_info;
+ u32 vm_exit_instruction_info;
u64 exit_qualification;
u64 exit_io_instruction_ecx;
@@ -272,7 +272,7 @@ enum vmcs_field {
IDT_VECTORING_INFO_FIELD = 0x00004408,
IDT_VECTORING_ERROR_CODE = 0x0000440a,
VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
- VMX_INSTRUCTION_INFO = 0x0000440e,
+ VM_EXIT_INSTRUCTION_INFO = 0x0000440e,
GUEST_ES_LIMIT = 0x00004800,
GUEST_CS_LIMIT = 0x00004802,
GUEST_SS_LIMIT = 0x00004804,
@@ -554,7 +554,7 @@ struct __packed vmcs12 {
u32 idt_vectoring_info_field;
u32 idt_vectoring_error_code;
u32 vm_exit_instruction_len;
- u32 vmx_instruction_info;
+ u32 vm_exit_instruction_info;
u32 guest_es_limit;
u32 guest_cs_limit;
u32 guest_ss_limit;
@@ -711,7 +711,7 @@ static inline void vmx_check_vmcs12_offsets(void) {
CHECK_OFFSET(idt_vectoring_info_field, 824);
CHECK_OFFSET(idt_vectoring_error_code, 828);
CHECK_OFFSET(vm_exit_instruction_len, 832);
- CHECK_OFFSET(vmx_instruction_info, 836);
+ CHECK_OFFSET(vm_exit_instruction_info, 836);
CHECK_OFFSET(guest_es_limit, 840);
CHECK_OFFSET(guest_cs_limit, 844);
CHECK_OFFSET(guest_ss_limit, 848);
@@ -1194,7 +1194,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
- FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
+ FIELD(VM_EXIT_INSTRUCTION_INFO, vm_exit_instruction_info),
FIELD(GUEST_ES_LIMIT, guest_es_limit),
FIELD(GUEST_CS_LIMIT, guest_cs_limit),
FIELD(GUEST_SS_LIMIT, guest_ss_limit),
@@ -8176,29 +8176,31 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
* #UD or #GP.
*/
static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
- unsigned long exit_qualification,
- u32 vmx_instruction_info, bool wr, gva_t *ret)
+ unsigned long exit_qualification,
+ u32 instr_info, bool wr, gva_t *ret)
{
gva_t off;
bool exn;
struct kvm_segment s;
/*
- * According to Vol. 3B, "Information for VM Exits Due to Instruction
- * Execution", on an exit, vmx_instruction_info holds most of the
- * addressing components of the operand. Only the displacement part
- * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
- * For how an actual address is calculated from all these components,
- * refer to Vol. 1, "Operand Addressing".
- */
- int scaling = vmx_instruction_info & 3;
- int addr_size = (vmx_instruction_info >> 7) & 7;
- bool is_reg = vmx_instruction_info & (1u << 10);
- int seg_reg = (vmx_instruction_info >> 15) & 7;
- int index_reg = (vmx_instruction_info >> 18) & 0xf;
- bool index_is_valid = !(vmx_instruction_info & (1u << 22));
- int base_reg = (vmx_instruction_info >> 23) & 0xf;
- bool base_is_valid = !(vmx_instruction_info & (1u << 27));
+ * According to Vol. 3B, "Information for VM Exits Due to
+ * Instruction Execution", on an exit, the VM-exit
+ * instruction-information field holds most of the addressing
+ * components of the operand. Only the displacement part is
+ * put in the exit qualification field (see 3B, "Basic VM-Exit
+ * Information"). For how an actual address is calculated
+ * from all these components, refer to Vol. 1, "Operand
+ * Addressing".
+ */
+ int scaling = instr_info & 3;
+ int addr_size = (instr_info >> 7) & 7;
+ bool is_reg = instr_info & (1u << 10);
+ int seg_reg = (instr_info >> 15) & 7;
+ int index_reg = (instr_info >> 18) & 0xf;
+ bool index_is_valid = !(instr_info & (1u << 22));
+ int base_reg = (instr_info >> 23) & 0xf;
+ bool base_is_valid = !(instr_info & (1u << 27));
if (is_reg) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -8272,7 +8274,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
struct x86_exception e;
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ vmcs_read32(VM_EXIT_INSTRUCTION_INFO), false, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
@@ -8880,7 +8882,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
* vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
* vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
* vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
- * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
+ * vmcs12->vm_exit_instruction_info = evmcs->vm_exit_instruction_info;
* vmcs12->exit_qualification = evmcs->exit_qualification;
* vmcs12->guest_linear_address = evmcs->guest_linear_address;
*
@@ -9037,7 +9039,7 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
- evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
+ evmcs->vm_exit_instruction_info = vmcs12->vm_exit_instruction_info;
evmcs->exit_qualification = vmcs12->exit_qualification;
@@ -9139,7 +9141,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
unsigned long field;
u64 field_value;
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ u32 instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
gva_t gva = 0;
struct vmcs12 *vmcs12;
@@ -9162,7 +9164,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
}
/* Decode instruction info and find the field to read */
- field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
return nested_vmx_failValid(vcpu,
@@ -9173,12 +9175,12 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* Note that the number of bits actually copied is 32 or 64 depending
* on the guest's mode (32 or 64 bit), not on the given field's length.
*/
- if (vmx_instruction_info & (1u << 10)) {
- kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
- field_value);
+ if (instr_info & (1u << 10)) {
+ kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf),
+ field_value);
} else {
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, true, &gva))
+ instr_info, true, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
kvm_write_guest_virt_system(vcpu, gva, &field_value,
@@ -9195,7 +9197,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
gva_t gva;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ u32 instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
/* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several
@@ -9213,12 +9215,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (vmx->nested.current_vmptr == -1ull)
return nested_vmx_failInvalid(vcpu);
- if (vmx_instruction_info & (1u << 10))
+ if (instr_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
- (((vmx_instruction_info) >> 3) & 0xf));
+ (((instr_info) >> 3) & 0xf));
else {
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, false, &gva))
+ instr_info, false, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &field_value,
(is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
@@ -9228,7 +9230,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
}
- field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
/*
* If the vCPU supports "VMWRITE to any supported field in the
* VMCS," then the "read-only" fields are actually read/write.
@@ -9440,7 +9442,7 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
- u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ u32 instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
gva_t gva;
@@ -9466,7 +9468,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
static int handle_invept(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 vmx_instruction_info, types;
+ u32 instr_info, types;
unsigned long type;
gva_t gva;
struct x86_exception e;
@@ -9484,8 +9486,8 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (instr_info >> 28) & 0xf);
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
@@ -9497,7 +9499,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
+ instr_info, false, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
@@ -9532,7 +9534,7 @@ static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 vmx_instruction_info;
+ u32 instr_info;
unsigned long type, types;
gva_t gva;
struct x86_exception e;
@@ -9552,8 +9554,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (instr_info >> 28) & 0xf);
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
@@ -9566,7 +9568,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
+ instr_info, false, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
@@ -9609,7 +9611,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
static int handle_invpcid(struct kvm_vcpu *vcpu)
{
- u32 vmx_instruction_info;
+ u32 instr_info;
unsigned long type;
bool pcid_enabled;
gva_t gva;
@@ -9626,8 +9628,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
return 1;
}
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (instr_info >> 28) & 0xf);
if (type > 3) {
kvm_inject_gp(vcpu, 0);
@@ -9638,7 +9640,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
* is read even if it isn't needed (e.g., for type==all)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
+ instr_info, false, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
@@ -10098,7 +10100,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, gpa_t bitmap)
{
- u32 vmx_instruction_info;
+ u32 instr_info;
unsigned long field;
u8 b;
@@ -10106,8 +10108,8 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
return true;
/* Decode instruction info and find the field to access */
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ instr_info = vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
+ field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
/* Out-of-range fields always cause a VM exit from L2 to L1 */
if (field >> 15)
@@ -13877,7 +13879,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->idt_vectoring_info_field = 0;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ vmcs12->vm_exit_instruction_info =
+ vmcs_read32(VM_EXIT_INSTRUCTION_INFO);
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
vmcs12->launch_state = 1;
@@ -247,7 +247,7 @@ static const struct evmcs_field vmcs_field_to_evmcs_1[] = {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
EVMCS1_FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
- EVMCS1_FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info,
+ EVMCS1_FIELD(VM_EXIT_INSTRUCTION_INFO, vm_exit_instruction_info,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
/* No mask defined in the spec (not used) */