@@ -1309,6 +1309,7 @@ struct kvm_x86_ops {
bool (*get_vmfunc_status)(void);
bool (*get_eptp_switching_status)(void);
u16 (*get_ept_view)(struct kvm_vcpu *vcpu);
+ int (*set_ept_view)(struct kvm_vcpu *vcpu, u16 view);
};
struct kvm_x86_nested_ops {
@@ -4373,6 +4373,28 @@ static int vmx_alloc_eptp_list_page(struct vcpu_vmx *vmx)
return 0;
}
+static int vmx_set_ept_view(struct kvm_vcpu *vcpu, u16 view)
+{
+ if (view >= KVM_MAX_EPT_VIEWS)
+ return -EINVAL;
+
+ if (to_vmx(vcpu)->eptp_list_pg) {
+ int r;
+
+ to_vmx(vcpu)->view = view;
+
+ /*
+ * Reload mmu and make sure vmx_load_mmu_pgd() is called so that
+ * VMCS::EPT_POINTER is updated accordingly
+ */
+ kvm_mmu_unload(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ WARN_ON_ONCE(r);
+ }
+
+ return 0;
+}
+
#define VMX_XSS_EXIT_BITMAP 0
/*
@@ -4463,9 +4485,15 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (cpu_has_vmx_encls_vmexit())
vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
- if (vmx->eptp_list_pg)
+ if (vmx->eptp_list_pg) {
+ u64 vm_function_control;
+
vmcs_write64(EPTP_LIST_ADDRESS,
page_to_phys(vmx->eptp_list_pg));
+ vm_function_control = vmcs_read64(VM_FUNCTION_CONTROL);
+ vm_function_control |= VMX_VMFUNC_EPTP_SWITCHING;
+ vmcs_write64(VM_FUNCTION_CONTROL, vm_function_control);
+ }
if (vmx_pt_mode_is_host_guest()) {
memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
@@ -5965,6 +5993,10 @@ static void dump_eptp_list(void)
eptp_list = phys_to_virt(eptp_list_phys);
+ pr_err("VMFunctionControl=%08x VMFunctionControlHigh=%08x\n",
+ vmcs_read32(VM_FUNCTION_CONTROL),
+ vmcs_read32(VM_FUNCTION_CONTROL_HIGH));
+
pr_err("*** EPTP Switching ***\n");
pr_err("EPTP List Address: %p (phys %p)\n",
eptp_list, (void *)eptp_list_phys);
@@ -8251,6 +8283,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_vmfunc_status = vmx_get_vmfunc_status,
.get_eptp_switching_status = vmx_get_eptp_switching_status,
.get_ept_view = vmx_get_ept_view,
+ .set_ept_view = vmx_set_ept_view,
};
static __init int hardware_setup(void)