@@ -2260,6 +2260,8 @@ static inline bool ctl_has_irq(CPUX86State *env)
return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
}
+hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
+ int *prot);
#if defined(TARGET_X86_64) && \
defined(CONFIG_USER_ONLY) && \
defined(CONFIG_LINUX)
@@ -24,6 +24,8 @@
#define V_INTR_MASKING_SHIFT 24
#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+#define V_VMLOAD_VMSAVE_ENABLED_MASK (1 << 1)
+
#define SVM_INTERRUPT_SHADOW_MASK 1
#define SVM_IOIO_STR_SHIFT 2
@@ -358,7 +358,7 @@ do_check_protect_pse36:
return error_code;
}
-static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
+hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
int *prot)
{
CPUX86State *env = &X86_CPU(cs)->env;
@@ -120,6 +120,25 @@ static inline bool virtual_gif_enabled(CPUX86State *env)
return false;
}
+static inline bool virtual_vm_load_save_enabled(CPUX86State *env, uint32_t exit_code, uintptr_t retaddr)
+{
+ uint64_t lbr_ctl;
+
+ if (likely(env->hflags & HF_GUEST_MASK)) {
+ if (likely(!(env->hflags2 & HF2_NPT_MASK)) || !(env->efer & MSR_EFER_LMA)) {
+ cpu_vmexit(env, exit_code, 0, retaddr);
+ }
+
+ lbr_ctl = x86_ldl_phys(env_cpu(env), env->vm_vmcb + offsetof(struct vmcb,
+ control.lbr_ctl));
+ return (env->features[FEAT_SVM] & CPUID_SVM_V_VMSAVE_VMLOAD)
+ && (lbr_ctl & V_VMLOAD_VMSAVE_ENABLED_MASK);
+
+ }
+
+ return false;
+}
+
static inline bool virtual_gif_set(CPUX86State *env)
{
return !virtual_gif_enabled(env) || (env->int_ctl & V_GIF_MASK);
@@ -430,6 +449,7 @@ void helper_vmload(CPUX86State *env, int aflag)
{
CPUState *cs = env_cpu(env);
target_ulong addr;
+ int prot;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
@@ -439,6 +459,10 @@ void helper_vmload(CPUX86State *env, int aflag)
addr = (uint32_t)env->regs[R_EAX];
}
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMLOAD, GETPC())) {
+ addr = get_hphys(cs, addr, MMU_DATA_LOAD, &prot);
+ }
+
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
@@ -472,6 +496,7 @@ void helper_vmsave(CPUX86State *env, int aflag)
{
CPUState *cs = env_cpu(env);
target_ulong addr;
+ int prot;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
@@ -481,6 +506,10 @@ void helper_vmsave(CPUX86State *env, int aflag)
addr = (uint32_t)env->regs[R_EAX];
}
+ if (virtual_vm_load_save_enabled(env, SVM_EXIT_VMSAVE, GETPC())) {
+ addr = get_hphys(cs, addr, MMU_DATA_STORE, &prot);
+ }
+
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
addr, x86_ldq_phys(cs,
The feature allows the VMSAVE and VMLOAD instructions to execute in guest mode without causing a VMEXIT. (APM2 15.33.1) This is currently untested; I sent it out as part of my GSoC project. Signed-off-by: Lara Lazier <laramglazier@gmail.com> --- target/i386/cpu.h | 2 ++ target/i386/svm.h | 2 ++ target/i386/tcg/sysemu/excp_helper.c | 2 +- target/i386/tcg/sysemu/svm_helper.c | 29 ++++++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-)