@@ -225,6 +225,21 @@ struct nested_vmx {
struct level_state *l1_state;
};
+enum vmcs_field_type {
+ VMCS_FIELD_TYPE_U16 = 0,
+ VMCS_FIELD_TYPE_U64 = 1,
+ VMCS_FIELD_TYPE_U32 = 2,
+ VMCS_FIELD_TYPE_ULONG = 3
+};
+
+#define VMCS_FIELD_LENGTH_OFFSET 13
+#define VMCS_FIELD_LENGTH_MASK 0x6000
+
+static inline int vmcs_field_length(unsigned long field)
+{
+ return (VMCS_FIELD_LENGTH_MASK & field) >> 13;
+}
+
struct vmcs {
u32 revision_id;
u32 abort;
@@ -288,6 +303,404 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
+#define SHADOW_VMCS_OFFSET(x) offsetof(struct shadow_vmcs, x)
+
+static unsigned short vmcs_field_to_offset_table[HOST_RIP+1] = {
+
+ [VIRTUAL_PROCESSOR_ID] =
+ SHADOW_VMCS_OFFSET(virtual_processor_id),
+ [GUEST_ES_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_es_selector),
+ [GUEST_CS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_cs_selector),
+ [GUEST_SS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_ss_selector),
+ [GUEST_DS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_ds_selector),
+ [GUEST_FS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_fs_selector),
+ [GUEST_GS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_gs_selector),
+ [GUEST_LDTR_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_ldtr_selector),
+ [GUEST_TR_SELECTOR] =
+ SHADOW_VMCS_OFFSET(guest_tr_selector),
+ [HOST_ES_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_es_selector),
+ [HOST_CS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_cs_selector),
+ [HOST_SS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_ss_selector),
+ [HOST_DS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_ds_selector),
+ [HOST_FS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_fs_selector),
+ [HOST_GS_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_gs_selector),
+ [HOST_TR_SELECTOR] =
+ SHADOW_VMCS_OFFSET(host_tr_selector),
+ [IO_BITMAP_A] =
+ SHADOW_VMCS_OFFSET(io_bitmap_a),
+ [IO_BITMAP_A_HIGH] =
+ SHADOW_VMCS_OFFSET(io_bitmap_a)+4,
+ [IO_BITMAP_B] =
+ SHADOW_VMCS_OFFSET(io_bitmap_b),
+ [IO_BITMAP_B_HIGH] =
+ SHADOW_VMCS_OFFSET(io_bitmap_b)+4,
+ [MSR_BITMAP] =
+ SHADOW_VMCS_OFFSET(msr_bitmap),
+ [MSR_BITMAP_HIGH] =
+ SHADOW_VMCS_OFFSET(msr_bitmap)+4,
+ [VM_EXIT_MSR_STORE_ADDR] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_store_addr),
+ [VM_EXIT_MSR_STORE_ADDR_HIGH] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_store_addr)+4,
+ [VM_EXIT_MSR_LOAD_ADDR] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_load_addr),
+ [VM_EXIT_MSR_LOAD_ADDR_HIGH] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_load_addr)+4,
+ [VM_ENTRY_MSR_LOAD_ADDR] =
+ SHADOW_VMCS_OFFSET(vm_entry_msr_load_addr),
+ [VM_ENTRY_MSR_LOAD_ADDR_HIGH] =
+ SHADOW_VMCS_OFFSET(vm_entry_msr_load_addr)+4,
+ [TSC_OFFSET] =
+ SHADOW_VMCS_OFFSET(tsc_offset),
+ [TSC_OFFSET_HIGH] =
+ SHADOW_VMCS_OFFSET(tsc_offset)+4,
+ [VIRTUAL_APIC_PAGE_ADDR] =
+ SHADOW_VMCS_OFFSET(virtual_apic_page_addr),
+ [VIRTUAL_APIC_PAGE_ADDR_HIGH] =
+ SHADOW_VMCS_OFFSET(virtual_apic_page_addr)+4,
+ [APIC_ACCESS_ADDR] =
+ SHADOW_VMCS_OFFSET(apic_access_addr),
+ [APIC_ACCESS_ADDR_HIGH] =
+ SHADOW_VMCS_OFFSET(apic_access_addr)+4,
+ [EPT_POINTER] =
+ SHADOW_VMCS_OFFSET(ept_pointer),
+ [EPT_POINTER_HIGH] =
+ SHADOW_VMCS_OFFSET(ept_pointer)+4,
+ [GUEST_PHYSICAL_ADDRESS] =
+ SHADOW_VMCS_OFFSET(guest_physical_address),
+ [GUEST_PHYSICAL_ADDRESS_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_physical_address)+4,
+ [VMCS_LINK_POINTER] =
+ SHADOW_VMCS_OFFSET(vmcs_link_pointer),
+ [VMCS_LINK_POINTER_HIGH] =
+ SHADOW_VMCS_OFFSET(vmcs_link_pointer)+4,
+ [GUEST_IA32_DEBUGCTL] =
+ SHADOW_VMCS_OFFSET(guest_ia32_debugctl),
+ [GUEST_IA32_DEBUGCTL_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_ia32_debugctl)+4,
+ [GUEST_IA32_PAT] =
+ SHADOW_VMCS_OFFSET(guest_ia32_pat),
+ [GUEST_IA32_PAT_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_ia32_pat)+4,
+ [GUEST_PDPTR0] =
+ SHADOW_VMCS_OFFSET(guest_pdptr0),
+ [GUEST_PDPTR0_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_pdptr0)+4,
+ [GUEST_PDPTR1] =
+ SHADOW_VMCS_OFFSET(guest_pdptr1),
+ [GUEST_PDPTR1_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_pdptr1)+4,
+ [GUEST_PDPTR2] =
+ SHADOW_VMCS_OFFSET(guest_pdptr2),
+ [GUEST_PDPTR2_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_pdptr2)+4,
+ [GUEST_PDPTR3] =
+ SHADOW_VMCS_OFFSET(guest_pdptr3),
+ [GUEST_PDPTR3_HIGH] =
+ SHADOW_VMCS_OFFSET(guest_pdptr3)+4,
+ [HOST_IA32_PAT] =
+ SHADOW_VMCS_OFFSET(host_ia32_pat),
+ [HOST_IA32_PAT_HIGH] =
+ SHADOW_VMCS_OFFSET(host_ia32_pat)+4,
+ [PIN_BASED_VM_EXEC_CONTROL] =
+ SHADOW_VMCS_OFFSET(pin_based_vm_exec_control),
+ [CPU_BASED_VM_EXEC_CONTROL] =
+ SHADOW_VMCS_OFFSET(cpu_based_vm_exec_control),
+ [EXCEPTION_BITMAP] =
+ SHADOW_VMCS_OFFSET(exception_bitmap),
+ [PAGE_FAULT_ERROR_CODE_MASK] =
+ SHADOW_VMCS_OFFSET(page_fault_error_code_mask),
+ [PAGE_FAULT_ERROR_CODE_MATCH] =
+ SHADOW_VMCS_OFFSET(page_fault_error_code_match),
+ [CR3_TARGET_COUNT] =
+ SHADOW_VMCS_OFFSET(cr3_target_count),
+ [VM_EXIT_CONTROLS] =
+ SHADOW_VMCS_OFFSET(vm_exit_controls),
+ [VM_EXIT_MSR_STORE_COUNT] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_store_count),
+ [VM_EXIT_MSR_LOAD_COUNT] =
+ SHADOW_VMCS_OFFSET(vm_exit_msr_load_count),
+ [VM_ENTRY_CONTROLS] =
+ SHADOW_VMCS_OFFSET(vm_entry_controls),
+ [VM_ENTRY_MSR_LOAD_COUNT] =
+ SHADOW_VMCS_OFFSET(vm_entry_msr_load_count),
+ [VM_ENTRY_INTR_INFO_FIELD] =
+ SHADOW_VMCS_OFFSET(vm_entry_intr_info_field),
+ [VM_ENTRY_EXCEPTION_ERROR_CODE] =
+ SHADOW_VMCS_OFFSET(vm_entry_exception_error_code),
+ [VM_ENTRY_INSTRUCTION_LEN] =
+ SHADOW_VMCS_OFFSET(vm_entry_instruction_len),
+ [TPR_THRESHOLD] =
+ SHADOW_VMCS_OFFSET(tpr_threshold),
+ [SECONDARY_VM_EXEC_CONTROL] =
+ SHADOW_VMCS_OFFSET(secondary_vm_exec_control),
+ [VM_INSTRUCTION_ERROR] =
+ SHADOW_VMCS_OFFSET(vm_instruction_error),
+ [VM_EXIT_REASON] =
+ SHADOW_VMCS_OFFSET(vm_exit_reason),
+ [VM_EXIT_INTR_INFO] =
+ SHADOW_VMCS_OFFSET(vm_exit_intr_info),
+ [VM_EXIT_INTR_ERROR_CODE] =
+ SHADOW_VMCS_OFFSET(vm_exit_intr_error_code),
+ [IDT_VECTORING_INFO_FIELD] =
+ SHADOW_VMCS_OFFSET(idt_vectoring_info_field),
+ [IDT_VECTORING_ERROR_CODE] =
+ SHADOW_VMCS_OFFSET(idt_vectoring_error_code),
+ [VM_EXIT_INSTRUCTION_LEN] =
+ SHADOW_VMCS_OFFSET(vm_exit_instruction_len),
+ [VMX_INSTRUCTION_INFO] =
+ SHADOW_VMCS_OFFSET(vmx_instruction_info),
+ [GUEST_ES_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_es_limit),
+ [GUEST_CS_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_cs_limit),
+ [GUEST_SS_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_ss_limit),
+ [GUEST_DS_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_ds_limit),
+ [GUEST_FS_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_fs_limit),
+ [GUEST_GS_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_gs_limit),
+ [GUEST_LDTR_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_ldtr_limit),
+ [GUEST_TR_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_tr_limit),
+ [GUEST_GDTR_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_gdtr_limit),
+ [GUEST_IDTR_LIMIT] =
+ SHADOW_VMCS_OFFSET(guest_idtr_limit),
+ [GUEST_ES_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_es_ar_bytes),
+ [GUEST_CS_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_cs_ar_bytes),
+ [GUEST_SS_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_ss_ar_bytes),
+ [GUEST_DS_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_ds_ar_bytes),
+ [GUEST_FS_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_fs_ar_bytes),
+ [GUEST_GS_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_gs_ar_bytes),
+ [GUEST_LDTR_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_ldtr_ar_bytes),
+ [GUEST_TR_AR_BYTES] =
+ SHADOW_VMCS_OFFSET(guest_tr_ar_bytes),
+ [GUEST_INTERRUPTIBILITY_INFO] =
+ SHADOW_VMCS_OFFSET(guest_interruptibility_info),
+ [GUEST_ACTIVITY_STATE] =
+ SHADOW_VMCS_OFFSET(guest_activity_state),
+ [GUEST_SYSENTER_CS] =
+ SHADOW_VMCS_OFFSET(guest_sysenter_cs),
+ [HOST_IA32_SYSENTER_CS] =
+ SHADOW_VMCS_OFFSET(host_ia32_sysenter_cs),
+ [CR0_GUEST_HOST_MASK] =
+ SHADOW_VMCS_OFFSET(cr0_guest_host_mask),
+ [CR4_GUEST_HOST_MASK] =
+ SHADOW_VMCS_OFFSET(cr4_guest_host_mask),
+ [CR0_READ_SHADOW] =
+ SHADOW_VMCS_OFFSET(cr0_read_shadow),
+ [CR4_READ_SHADOW] =
+ SHADOW_VMCS_OFFSET(cr4_read_shadow),
+ [CR3_TARGET_VALUE0] =
+ SHADOW_VMCS_OFFSET(cr3_target_value0),
+ [CR3_TARGET_VALUE1] =
+ SHADOW_VMCS_OFFSET(cr3_target_value1),
+ [CR3_TARGET_VALUE2] =
+ SHADOW_VMCS_OFFSET(cr3_target_value2),
+ [CR3_TARGET_VALUE3] =
+ SHADOW_VMCS_OFFSET(cr3_target_value3),
+ [EXIT_QUALIFICATION] =
+ SHADOW_VMCS_OFFSET(exit_qualification),
+ [GUEST_LINEAR_ADDRESS] =
+ SHADOW_VMCS_OFFSET(guest_linear_address),
+ [GUEST_CR0] =
+ SHADOW_VMCS_OFFSET(guest_cr0),
+ [GUEST_CR3] =
+ SHADOW_VMCS_OFFSET(guest_cr3),
+ [GUEST_CR4] =
+ SHADOW_VMCS_OFFSET(guest_cr4),
+ [GUEST_ES_BASE] =
+ SHADOW_VMCS_OFFSET(guest_es_base),
+ [GUEST_CS_BASE] =
+ SHADOW_VMCS_OFFSET(guest_cs_base),
+ [GUEST_SS_BASE] =
+ SHADOW_VMCS_OFFSET(guest_ss_base),
+ [GUEST_DS_BASE] =
+ SHADOW_VMCS_OFFSET(guest_ds_base),
+ [GUEST_FS_BASE] =
+ SHADOW_VMCS_OFFSET(guest_fs_base),
+ [GUEST_GS_BASE] =
+ SHADOW_VMCS_OFFSET(guest_gs_base),
+ [GUEST_LDTR_BASE] =
+ SHADOW_VMCS_OFFSET(guest_ldtr_base),
+ [GUEST_TR_BASE] =
+ SHADOW_VMCS_OFFSET(guest_tr_base),
+ [GUEST_GDTR_BASE] =
+ SHADOW_VMCS_OFFSET(guest_gdtr_base),
+ [GUEST_IDTR_BASE] =
+ SHADOW_VMCS_OFFSET(guest_idtr_base),
+ [GUEST_DR7] =
+ SHADOW_VMCS_OFFSET(guest_dr7),
+ [GUEST_RSP] =
+ SHADOW_VMCS_OFFSET(guest_rsp),
+ [GUEST_RIP] =
+ SHADOW_VMCS_OFFSET(guest_rip),
+ [GUEST_RFLAGS] =
+ SHADOW_VMCS_OFFSET(guest_rflags),
+ [GUEST_PENDING_DBG_EXCEPTIONS] =
+ SHADOW_VMCS_OFFSET(guest_pending_dbg_exceptions),
+ [GUEST_SYSENTER_ESP] =
+ SHADOW_VMCS_OFFSET(guest_sysenter_esp),
+ [GUEST_SYSENTER_EIP] =
+ SHADOW_VMCS_OFFSET(guest_sysenter_eip),
+ [HOST_CR0] =
+ SHADOW_VMCS_OFFSET(host_cr0),
+ [HOST_CR3] =
+ SHADOW_VMCS_OFFSET(host_cr3),
+ [HOST_CR4] =
+ SHADOW_VMCS_OFFSET(host_cr4),
+ [HOST_FS_BASE] =
+ SHADOW_VMCS_OFFSET(host_fs_base),
+ [HOST_GS_BASE] =
+ SHADOW_VMCS_OFFSET(host_gs_base),
+ [HOST_TR_BASE] =
+ SHADOW_VMCS_OFFSET(host_tr_base),
+ [HOST_GDTR_BASE] =
+ SHADOW_VMCS_OFFSET(host_gdtr_base),
+ [HOST_IDTR_BASE] =
+ SHADOW_VMCS_OFFSET(host_idtr_base),
+ [HOST_IA32_SYSENTER_ESP] =
+ SHADOW_VMCS_OFFSET(host_ia32_sysenter_esp),
+ [HOST_IA32_SYSENTER_EIP] =
+ SHADOW_VMCS_OFFSET(host_ia32_sysenter_eip),
+ [HOST_RSP] =
+ SHADOW_VMCS_OFFSET(host_rsp),
+ [HOST_RIP] =
+ SHADOW_VMCS_OFFSET(host_rip),
+};
+
+static inline unsigned short vmcs_field_to_offset(unsigned long field)
+{
+
+ if (field > HOST_RIP || vmcs_field_to_offset_table[field] == 0) {
+ printk(KERN_ERR "invalid vmcs encoding 0x%lx\n", field);
+ return -1;
+ }
+
+ return vmcs_field_to_offset_table[field];
+}
+
+static inline unsigned long nested_vmcs_readl(struct kvm_vcpu *vcpu,
+ unsigned long field)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long *entry;
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_ERR "%s invalid nested vmcs\n", __func__);
+ return -1;
+ }
+
+ entry = (unsigned long *)((char *)(vmx->nested.l2_state->shadow_vmcs) +
+ vmcs_field_to_offset(field));
+ return *entry;
+}
+
+static inline u16 nested_vmcs_read16(struct kvm_vcpu *vcpu,
+ unsigned long field)
+{
+ return nested_vmcs_readl(vcpu, field);
+}
+
+static inline u32 nested_vmcs_read32(struct kvm_vcpu *vcpu, unsigned long field)
+{
+ return nested_vmcs_readl(vcpu, field);
+}
+
+static inline u64 nested_vmcs_read64(struct kvm_vcpu *vcpu, unsigned long field)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 *entry;
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_ERR "%s invalid nested vmcs\n", __func__);
+ return -1;
+ }
+
+ entry = (u64 *)((char *)(vmx->nested.l2_state->shadow_vmcs) +
+ vmcs_field_to_offset(field));
+ return *entry;
+}
+
+static inline void nested_vmcs_writel(struct kvm_vcpu *vcpu,
+ unsigned long field, unsigned long value)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long entry =
+ (unsigned long)(vmx->nested.l2_state->shadow_vmcs);
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_ERR "%s invalid nested vmcs\n", __func__);
+ return;
+ }
+ entry += vmcs_field_to_offset(field);
+ *(unsigned long *)entry = value;
+}
+
+static inline void nested_vmcs_write16(struct kvm_vcpu *vcpu,
+ unsigned long field, u16 value)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long entry =
+ (unsigned long)(vmx->nested.l2_state->shadow_vmcs);
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_ERR "%s invalid nested vmcs\n", __func__);
+ return;
+ }
+ entry += vmcs_field_to_offset(field);
+ *(u16 *)entry = value;
+}
+
+static inline void nested_vmcs_write32(struct kvm_vcpu *vcpu,
+ unsigned long field, u32 value)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long entry =
+ (unsigned long)(vmx->nested.l2_state->shadow_vmcs);
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_ERR "%s invalid nested vmcs\n", __func__);
+ return;
+ }
+ entry += vmcs_field_to_offset(field);
+ *(u32 *)entry = value;
+}
+
+static inline void nested_vmcs_write64(struct kvm_vcpu *vcpu,
+ unsigned long field, u64 value)
+{
+#ifdef CONFIG_X86_64
+ nested_vmcs_writel(vcpu, field, value);
+#else /* nested: 32 bit not actually tested */
+ nested_vmcs_writel(vcpu, field, value);
+ nested_vmcs_writel(vcpu, field+1, value >> 32);
+#endif
+}
+
static struct page *nested_get_page(struct kvm_vcpu *vcpu,
u64 vmcs_addr)
{
@@ -307,6 +720,50 @@ static struct page *nested_get_page(struct kvm_vcpu *vcpu,
}
+static int nested_map_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct page *vmcs_page = nested_get_page(vcpu, vmx->nested.vmptr);
+
+ if (vmcs_page == NULL) {
+ printk(KERN_INFO "%s: failure in nested_get_page\n",__func__);
+ return 0;
+ }
+
+ if (vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_INFO "%s: shadow vmcs already mapped\n",__func__);
+ return 0;
+ }
+
+ vmx->nested.l2_state->shadow_vmcs = kmap_atomic(vmcs_page, KM_USER0);
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk(KERN_INFO "%s: error in kmap_atomic\n",__func__);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void nested_unmap_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct page *page;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.l2_state->shadow_vmcs) {
+ printk("Shadow vmcs already unmapped\n");
+ return;
+ }
+
+ page = kmap_atomic_to_page(vmx->nested.l2_state->shadow_vmcs);
+
+ kunmap_atomic(vmx->nested.l2_state->shadow_vmcs, KM_USER0);
+
+ kvm_release_page_dirty(page);
+
+ vmx->nested.l2_state->shadow_vmcs = NULL;
+}
+
static int init_rmode(struct kvm *kvm);
static u64 construct_eptp(unsigned long root_hpa);
@@ -3550,6 +4007,26 @@ static void clear_rflags_cf_zf(struct kvm_vcpu *vcpu)
vmx_set_rflags(vcpu, rflags);
}
+static void set_rflags_to_vmx_fail_invalid(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags;
+ rflags = vmx_get_rflags(vcpu);
+ rflags |= X86_EFLAGS_CF;
+ rflags &= ~X86_EFLAGS_PF & ~X86_EFLAGS_AF & ~X86_EFLAGS_ZF &
+ ~X86_EFLAGS_SF & ~X86_EFLAGS_OF;
+ vmx_set_rflags(vcpu, rflags);
+}
+
+static void set_rflags_to_vmx_fail_valid(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags;
+ rflags = vmx_get_rflags(vcpu);
+ rflags |= X86_EFLAGS_ZF;
+ rflags &= ~X86_EFLAGS_PF & ~X86_EFLAGS_AF & ~X86_EFLAGS_CF &
+ ~X86_EFLAGS_SF & ~X86_EFLAGS_OF;
+ vmx_set_rflags(vcpu, rflags);
+}
+
static int handle_vmclear(struct kvm_vcpu *vcpu)
{
if (!nested_vmx_check_permission(vcpu))
@@ -3563,6 +4040,116 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+#ifndef CONFIG_X86_64
+ u64 value;
+#endif
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (!nested_map_shadow_vmcs(vcpu)) {
+ printk(KERN_INFO "%s invalid shadow vmcs\n", __func__);
+ set_rflags_to_vmx_fail_invalid(vcpu);
+ return 1;
+ }
+
+ switch (vmcs_field_length(vcpu->arch.regs[VCPU_REGS_RDX])) {
+ case VMCS_FIELD_TYPE_U16:
+ vcpu->arch.regs[VCPU_REGS_RAX] =
+ nested_vmcs_read16(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX]);
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ vcpu->arch.regs[VCPU_REGS_RAX] =
+ nested_vmcs_read32(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX]);
+ break;
+ case VMCS_FIELD_TYPE_U64:
+#ifdef CONFIG_X86_64
+ vcpu->arch.regs[VCPU_REGS_RAX] =
+ nested_vmcs_read64(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX]);
+#else /* nested: 32 bit not actually tested */
+ value = nested_vmcs_read64(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX]);
+ vcpu->arch.regs[VCPU_REGS_RAX] = value;
+ vcpu->arch.regs[VCPU_REGS_RBX] = value >> 32;
+#endif
+ break;
+ case VMCS_FIELD_TYPE_ULONG:
+ vcpu->arch.regs[VCPU_REGS_RAX] =
+ nested_vmcs_readl(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX]);
+ break;
+ default:
+ printk(KERN_INFO "%s invalid field\n", __func__);
+ set_rflags_to_vmx_fail_valid(vcpu);
+ vmcs_write32(VM_INSTRUCTION_ERROR, 12);
+ nested_unmap_shadow_vmcs(vcpu);
+ return 1;
+ }
+
+ clear_rflags_cf_zf(vcpu);
+ skip_emulated_instruction(vcpu);
+ nested_unmap_shadow_vmcs(vcpu);
+ return 1;
+}
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+#ifndef CONFIG_X86_64
+ u64 value ;
+#endif
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (!nested_map_shadow_vmcs(vcpu)) {
+ printk(KERN_INFO "%s invalid shadow vmcs\n", __func__);
+ set_rflags_to_vmx_fail_invalid(vcpu);
+ return 1;
+ }
+
+ switch (vmcs_field_length(vcpu->arch.regs[VCPU_REGS_RDX])) {
+ case VMCS_FIELD_TYPE_U16:
+ nested_vmcs_write16(vcpu, vcpu->arch.regs[VCPU_REGS_RDX],
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ break;
+ case VMCS_FIELD_TYPE_U32:
+ nested_vmcs_write32(vcpu, vcpu->arch.regs[VCPU_REGS_RDX],
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ break;
+ case VMCS_FIELD_TYPE_U64:
+#ifdef CONFIG_X86_64
+ nested_vmcs_write64(vcpu, vcpu->arch.regs[VCPU_REGS_RDX],
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+#else /* nested: 32 bit not actually tested */
+ value = vcpu->arch.regs[VCPU_REGS_RAX] |
+ (vcpu->arch.regs[VCPU_REGS_RBX] << 32);
+ nested_vmcs_write64(vcpu,
+ vcpu->arch.regs[VCPU_REGS_RDX], value);
+#endif
+ break;
+ case VMCS_FIELD_TYPE_ULONG:
+ nested_vmcs_writel(vcpu, vcpu->arch.regs[VCPU_REGS_RDX],
+ vcpu->arch.regs[VCPU_REGS_RAX]);
+ break;
+ default:
+ printk(KERN_INFO "%s invalid field\n", __func__);
+ set_rflags_to_vmx_fail_valid(vcpu);
+ vmcs_write32(VM_INSTRUCTION_ERROR, 12);
+ nested_unmap_shadow_vmcs(vcpu);
+ return 1;
+ }
+
+ clear_rflags_cf_zf(vcpu);
+ skip_emulated_instruction(vcpu);
+ nested_unmap_shadow_vmcs(vcpu);
+ return 1;
+}
+
static int handle_vmoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3950,9 +4537,9 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
[EXIT_REASON_VMPTRLD] = handle_vmptrld,
[EXIT_REASON_VMPTRST] = handle_vmptrst,
- [EXIT_REASON_VMREAD] = handle_vmx_insn,
+ [EXIT_REASON_VMREAD] = handle_vmread,
[EXIT_REASON_VMRESUME] = handle_vmx_insn,
- [EXIT_REASON_VMWRITE] = handle_vmx_insn,
+ [EXIT_REASON_VMWRITE] = handle_vmwrite,
[EXIT_REASON_VMOFF] = handle_vmoff,
[EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,