Message ID | 20220414132013.1588929-34-vkuznets@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush feature | expand |
On Thu, 2022-04-14 at 15:20 +0200, Vitaly Kuznetsov wrote: > Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls > from L2 don't exit to L1 unless 'TlbLockCount' is set in the Partition > assist page. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> > --- > .../selftests/kvm/x86_64/hyperv_svm_test.c | 60 +++++++++++++++++-- > 1 file changed, 56 insertions(+), 4 deletions(-) > > diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c > index 21f5ca9197da..99f0a2ead7df 100644 > --- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c > +++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c > @@ -42,11 +42,24 @@ struct hv_enlightenments { > */ > #define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31) > > +#define HV_SVM_EXITCODE_ENL 0xF0000000 > +#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1) > + > static inline void vmmcall(void) > { > __asm__ __volatile__("vmmcall"); > } > > +static inline void hypercall(u64 control, vm_vaddr_t arg1, vm_vaddr_t arg2) > +{ > + asm volatile("mov %3, %%r8\n" > + "vmmcall" > + : "+c" (control), "+d" (arg1) > + : "r" (arg2) > + : "cc", "memory", "rax", "rbx", "r8", "r9", "r10", > + "r11", "r12", "r13", "r14", "r15"); > +} Yes, this code should really be put in a common file :) > + > void l2_guest_code(void) > { > GUEST_SYNC(3); > @@ -62,11 +75,21 @@ void l2_guest_code(void) > > GUEST_SYNC(5); > > + /* L2 TLB flush tests */ > + hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0, > + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS); > + rdmsr(MSR_FS_BASE); > + hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0, > + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS); > + /* Make sure we're not issuing Hyper-V TLB flush call again */ > + __asm__ __volatile__ ("mov $0xdeadbeef, %rcx"); > + > /* Done, exit to L1 and never come back. */ > vmmcall(); > } > > -static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) > +static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, > + vm_vaddr_t pgs_gpa) > { > unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; > struct vmcb *vmcb = svm->vmcb; > @@ -75,13 +98,23 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) > > GUEST_SYNC(1); > > - wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48); > + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); > + wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); > + enable_vp_assist(svm->vp_assist_gpa, svm->vp_assist); > > GUEST_ASSERT(svm->vmcb_gpa); > /* Prepare for L2 execution. */ > generic_svm_setup(svm, l2_guest_code, > &l2_guest_stack[L2_GUEST_STACK_SIZE]); > > + /* L2 TLB flush setup */ > + hve->partition_assist_page = svm->partition_assist_gpa; > + hve->hv_enlightenments_control.nested_flush_hypercall = 1; > + hve->hv_vm_id = 1; > + hve->hv_vp_id = 1; > + current_vp_assist->nested_control.features.directhypercall = 1; > + *(u32 *)(svm->partition_assist) = 0; > + > GUEST_SYNC(2); > run_guest(vmcb, svm->vmcb_gpa); > GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); > @@ -116,6 +149,20 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) > GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); > vmcb->save.rip += 2; /* rdmsr */ > > + > + /* > + * L2 TLB flush test. First VMCALL should be handled directly by L0, > + * no VMCALL exit expected. > + */ > + run_guest(vmcb, svm->vmcb_gpa); > + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); > + vmcb->save.rip += 2; /* rdmsr */ > + /* Enable synthetic vmexit */ > + *(u32 *)(svm->partition_assist) = 1; > + run_guest(vmcb, svm->vmcb_gpa); > + GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL); > + GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH); > + > run_guest(vmcb, svm->vmcb_gpa); > GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); > GUEST_SYNC(6); > @@ -126,7 +173,7 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) > int main(int argc, char *argv[]) > { > vm_vaddr_t nested_gva = 0; > - > + vm_vaddr_t hcall_page; > struct kvm_vm *vm; > struct kvm_run *run; > struct ucall uc; > @@ -141,7 +188,12 @@ int main(int argc, char *argv[]) > vcpu_set_hv_cpuid(vm, VCPU_ID); > run = vcpu_state(vm, VCPU_ID); > vcpu_alloc_svm(vm, &nested_gva); > - vcpu_args_set(vm, VCPU_ID, 1, nested_gva); > + > + hcall_page = vm_vaddr_alloc_pages(vm, 1); > + memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); > + > + vcpu_args_set(vm, VCPU_ID, 2, nested_gva, addr_gva2gpa(vm, hcall_page)); > + vcpu_set_msr(vm, VCPU_ID, HV_X64_MSR_VP_INDEX, VCPU_ID); > > for (stage = 1;; stage++) { > _vcpu_run(vm, VCPU_ID); Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Best regards, Maxim Levitsky
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c index 21f5ca9197da..99f0a2ead7df 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c @@ -42,11 +42,24 @@ struct hv_enlightenments { */ #define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31) +#define HV_SVM_EXITCODE_ENL 0xF0000000 +#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1) + static inline void vmmcall(void) { __asm__ __volatile__("vmmcall"); } +static inline void hypercall(u64 control, vm_vaddr_t arg1, vm_vaddr_t arg2) +{ + asm volatile("mov %3, %%r8\n" + "vmmcall" + : "+c" (control), "+d" (arg1) + : "r" (arg2) + : "cc", "memory", "rax", "rbx", "r8", "r9", "r10", + "r11", "r12", "r13", "r14", "r15"); +} + void l2_guest_code(void) { GUEST_SYNC(3); @@ -62,11 +75,21 @@ void l2_guest_code(void) GUEST_SYNC(5); + /* L2 TLB flush tests */ + hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0, + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS); + rdmsr(MSR_FS_BASE); + hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0, + HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS); + /* Make sure we're not issuing Hyper-V TLB flush call again */ + __asm__ __volatile__ ("mov $0xdeadbeef, %rcx"); + /* Done, exit to L1 and never come back. */ vmmcall(); } -static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) +static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm, + vm_vaddr_t pgs_gpa) { unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; struct vmcb *vmcb = svm->vmcb; @@ -75,13 +98,23 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) GUEST_SYNC(1); - wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48); + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); + wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa); + enable_vp_assist(svm->vp_assist_gpa, svm->vp_assist); GUEST_ASSERT(svm->vmcb_gpa); /* Prepare for L2 execution. */ generic_svm_setup(svm, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); + /* L2 TLB flush setup */ + hve->partition_assist_page = svm->partition_assist_gpa; + hve->hv_enlightenments_control.nested_flush_hypercall = 1; + hve->hv_vm_id = 1; + hve->hv_vp_id = 1; + current_vp_assist->nested_control.features.directhypercall = 1; + *(u32 *)(svm->partition_assist) = 0; + GUEST_SYNC(2); run_guest(vmcb, svm->vmcb_gpa); GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); @@ -116,6 +149,20 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); vmcb->save.rip += 2; /* rdmsr */ + + /* + * L2 TLB flush test. First VMCALL should be handled directly by L0, + * no VMCALL exit expected. + */ + run_guest(vmcb, svm->vmcb_gpa); + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); + vmcb->save.rip += 2; /* rdmsr */ + /* Enable synthetic vmexit */ + *(u32 *)(svm->partition_assist) = 1; + run_guest(vmcb, svm->vmcb_gpa); + GUEST_ASSERT(vmcb->control.exit_code == HV_SVM_EXITCODE_ENL); + GUEST_ASSERT(vmcb->control.exit_info_1 == HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH); + run_guest(vmcb, svm->vmcb_gpa); GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); GUEST_SYNC(6); @@ -126,7 +173,7 @@ static void __attribute__((__flatten__)) guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { vm_vaddr_t nested_gva = 0; - + vm_vaddr_t hcall_page; struct kvm_vm *vm; struct kvm_run *run; struct ucall uc; @@ -141,7 +188,12 @@ int main(int argc, char *argv[]) vcpu_set_hv_cpuid(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID); vcpu_alloc_svm(vm, &nested_gva); - vcpu_args_set(vm, VCPU_ID, 1, nested_gva); + + hcall_page = vm_vaddr_alloc_pages(vm, 1); + memset(addr_gva2hva(vm, hcall_page), 0x0, getpagesize()); + + vcpu_args_set(vm, VCPU_ID, 2, nested_gva, addr_gva2gpa(vm, hcall_page)); + vcpu_set_msr(vm, VCPU_ID, HV_X64_MSR_VP_INDEX, VCPU_ID); for (stage = 1;; stage++) { _vcpu_run(vm, VCPU_ID);
Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls from L2 don't exit to L1 unless 'TlbLockCount' is set in the Partition assist page. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> --- .../selftests/kvm/x86_64/hyperv_svm_test.c | 60 +++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-)