@@ -19,10 +19,15 @@ u64 tsc_start;
u64 tsc_end;
u64 vmrun_sum, vmexit_sum;
+u64 vmsave_sum, vmload_sum;
u64 latvmrun_max;
u64 latvmrun_min;
u64 latvmexit_max;
u64 latvmexit_min;
+u64 latvmload_max;
+u64 latvmload_min;
+u64 latvmsave_max;
+u64 latvmsave_min;
u64 runs;
static bool npt_supported(void)
@@ -661,6 +666,51 @@ static bool latency_check(struct test *test)
return true;
}
+static void load_save_prepare(struct test *test)
+{
+ default_prepare(test);
+ runs = LATENCY_RUNS;
+ latvmload_min = latvmsave_min = -1ULL;
+ latvmload_max = latvmsave_max = 0;
+ vmload_sum = vmsave_sum = 0;
+}
+
+static bool load_save_finished(struct test *test)
+{
+ u64 vmcb_phys = virt_to_phys(test->vmcb);
+ u64 cycles;
+
+ for ( ; runs != 0; runs--) {
+ tsc_start = rdtsc();
+ asm volatile("vmload\n\t" : : "a"(vmcb_phys) : "memory");
+ cycles = rdtsc() - tsc_start;
+ if (cycles > latvmload_max)
+ latvmload_max = cycles;
+ if (cycles < latvmload_min)
+ latvmload_min = cycles;
+ vmload_sum += cycles;
+
+ tsc_start = rdtsc();
+ asm volatile("vmsave\n\t" : : "a"(vmcb_phys) : "memory");
+ cycles = rdtsc() - tsc_start;
+ if (cycles > latvmsave_max)
+ latvmsave_max = cycles;
+ if (cycles < latvmsave_min)
+ latvmsave_min = cycles;
+ vmsave_sum += cycles;
+ }
+
+ return true;
+}
+
+static bool load_save_check(struct test *test)
+{
+ printf(" Latency VMLOAD: max: %d min: %d avg: %d\n", latvmload_max,
+ latvmload_min, vmload_sum / LATENCY_RUNS);
+ printf(" Latency VMSAVE: max: %d min: %d avg: %d\n", latvmsave_max,
+ latvmsave_min, vmsave_sum / LATENCY_RUNS);
+ return true;
+}
static struct test tests[] = {
{ "null", default_supported, default_prepare, null_test,
default_finished, null_check },
@@ -695,6 +745,8 @@ static struct test tests[] = {
default_finished, npt_pfwalk_check },
{ "latency_run_exit", default_supported, latency_prepare, latency_test,
latency_finished, latency_check },
+ { "latency_load_save", default_supported, load_save_prepare, null_test,
+ load_save_finished, load_save_check },
};
int main(int ac, char **av)