diff mbox series

[v2,6/6] KVM: selftests: Test Hyper-V extended hypercall exit to userspace

Message ID 20221121234026.3037083-7-vipinsh@google.com (mailing list archive)
State New, archived
Headers show
Series Add Hyper-v extended hypercall support in KVM | expand

Commit Message

Vipin Sharma Nov. 21, 2022, 11:40 p.m. UTC
Hyper-V extended hypercalls by default exit to userspace. Verify
userspace gets the call, update the result and then verify in guest
correct result is received.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
---
 tools/testing/selftests/kvm/.gitignore        |  1 +
 tools/testing/selftests/kvm/Makefile          |  1 +
 .../selftests/kvm/include/x86_64/processor.h  |  3 +
 .../kvm/x86_64/hyperv_extended_hypercalls.c   | 94 +++++++++++++++++++
 4 files changed, 99 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c

Comments

Vitaly Kuznetsov Nov. 22, 2022, 3:57 p.m. UTC | #1
Vipin Sharma <vipinsh@google.com> writes:

> Hyper-V extended hypercalls by default exit to userspace. Verify
> userspace gets the call, update the result and then verify in guest
> correct result is received.
>
> Signed-off-by: Vipin Sharma <vipinsh@google.com>
> ---
>  tools/testing/selftests/kvm/.gitignore        |  1 +
>  tools/testing/selftests/kvm/Makefile          |  1 +
>  .../selftests/kvm/include/x86_64/processor.h  |  3 +
>  .../kvm/x86_64/hyperv_extended_hypercalls.c   | 94 +++++++++++++++++++
>  4 files changed, 99 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
>
> diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
> index 082855d94c72..b17874697d74 100644
> --- a/tools/testing/selftests/kvm/.gitignore
> +++ b/tools/testing/selftests/kvm/.gitignore
> @@ -24,6 +24,7 @@
>  /x86_64/hyperv_clock
>  /x86_64/hyperv_cpuid
>  /x86_64/hyperv_evmcs
> +/x86_64/hyperv_extended_hypercalls

My personal preference would be to shorten the name to something like
"hyperv_ext_hcalls", doesn't seem to be ambiguos. No strong preference
though, feel free to keep the long version.

>  /x86_64/hyperv_features
>  /x86_64/hyperv_ipi
>  /x86_64/hyperv_svm_test
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 2275ba861e0e..a0e12f5d9835 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -87,6 +87,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs
> +TEST_GEN_PROGS_x86_64 += x86_64/hyperv_extended_hypercalls
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi
>  TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
> diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
> index 5d310abe6c3f..f167396b887b 100644
> --- a/tools/testing/selftests/kvm/include/x86_64/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
> @@ -168,6 +168,9 @@ struct kvm_x86_cpu_feature {
>  #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
>  #define X86_FEATURE_KVM_MIGRATION_CONTROL	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
>  
> +/* Hyper-V defined paravirt features */
> +#define X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS	KVM_X86_CPU_FEATURE(0x40000003, 0, EBX, 20)
> +

I completely forgot about my other series where I've converted the whole
hyperv_features test to using KVM_X86_CPU_FEATURE():
https://lore.kernel.org/kvm/20221013095849.705943-6-vkuznets@redhat.com/

but your define reminded me of it, thanks! Hope the whole thing will get
queued soon.

As for your change, I think it is better suited for
include/x86_64/hyperv.h instead of include/x86_64/processor.h anyway,
I'm trying to keep all Hyper-V related stuff separate as Hyper-V CPUID
leaves intersect with KVM's, e.g. 0x40000001.

>  /*
>   * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
>   * value/property as opposed to a single-bit feature.  Again, pack the info
> diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
> new file mode 100644
> index 000000000000..13c1b03294a4
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
> @@ -0,0 +1,94 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001),
> + * exit to userspace and receive result in guest.
> + *
> + * Negative tests are present in hyperv_features.c
> + *
> + * Copyright 2022 Google LLC
> + * Author: Vipin Sharma <vipinsh@google.com>
> + */
> +
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "hyperv.h"
> +
> +/* Any value is fine */
> +#define EXT_CAPABILITIES 0xbull
> +
> +static void guest_code(vm_vaddr_t in_pg_gpa, vm_vaddr_t out_pg_gpa,
> +		       vm_vaddr_t out_pg_gva)
> +{
> +	uint64_t *output_gva;
> +
> +	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
> +	wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa);
> +
> +	output_gva = (uint64_t *)out_pg_gva;
> +
> +	hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa);
> +
> +	/* TLFS states output will be a uint64_t value */
> +	GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES);
> +
> +	GUEST_DONE();
> +}
> +
> +int main(void)
> +{
> +	vm_vaddr_t hcall_out_page;
> +	vm_vaddr_t hcall_in_page;
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_run *run;
> +	struct kvm_vm *vm;
> +	uint64_t *outval;
> +	struct ucall uc;
> +
> +	/* Verify if extended hypercalls are supported */
> +	if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(),
> +			   X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS)) {
> +		print_skip("Extended calls not supported by the kernel");
> +		exit(KSFT_SKIP);
> +	}
> +
> +	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
> +	run = vcpu->run;
> +	vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);

Do we need this enforcement assuming we have no plans to add 'negative'
tests here (hyperv_features does it just fine)? vcpu_set_hv_cpuid()
enables everything anyway...

> +	vcpu_set_hv_cpuid(vcpu);
> +
> +	/* Hypercall input */
> +	hcall_in_page = vm_vaddr_alloc_pages(vm, 1);
> +	memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size);
> +
> +	/* Hypercall output */
> +	hcall_out_page = vm_vaddr_alloc_pages(vm, 1);
> +	memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size);
> +
> +	vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
> +		      addr_gva2gpa(vm, hcall_out_page), hcall_out_page);
> +
> +	vcpu_run(vcpu);
> +
> +	ASSERT_EXIT_REASON(vcpu, KVM_EXIT_HYPERV);
> +
> +	outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]);
> +	*outval = EXT_CAPABILITIES;
> +	run->hyperv.u.hcall.result = HV_STATUS_SUCCESS;
> +
> +	vcpu_run(vcpu);
> +
> +	ASSERT_EXIT_REASON(vcpu, KVM_EXIT_IO);
> +
> +	switch (get_ucall(vcpu, &uc)) {
> +	case UCALL_ABORT:
> +		REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld");
> +		break;
> +	case UCALL_DONE:
> +		break;
> +	default:
> +		TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
> +	}
> +
> +	kvm_vm_free(vm);
> +	return 0;
> +}
Vipin Sharma Nov. 23, 2022, 7:33 p.m. UTC | #2
On Tue, Nov 22, 2022 at 7:57 AM Vitaly Kuznetsov <vkuznets@redhat.com> wrote:
>
> Vipin Sharma <vipinsh@google.com> writes:
>
> > index 082855d94c72..b17874697d74 100644
> > --- a/tools/testing/selftests/kvm/.gitignore
> > +++ b/tools/testing/selftests/kvm/.gitignore
> > @@ -24,6 +24,7 @@
> >  /x86_64/hyperv_clock
> >  /x86_64/hyperv_cpuid
> >  /x86_64/hyperv_evmcs
> > +/x86_64/hyperv_extended_hypercalls
>
> My personal preference would be to shorten the name to something like
> "hyperv_ext_hcalls", doesn't seem to be ambiguos. No strong preference
> though, feel free to keep the long version.
>

I will keep the long one, in v1 David was suggesting it will be easier
for non Hyperv developers to read and understand.

> > +/* Hyper-V defined paravirt features */
> > +#define X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS       KVM_X86_CPU_FEATURE(0x40000003, 0, EBX, 20)
> > +
>
> I completely forgot about my other series where I've converted the whole
> hyperv_features test to using KVM_X86_CPU_FEATURE():
> https://lore.kernel.org/kvm/20221013095849.705943-6-vkuznets@redhat.com/
>
> but your define reminded me of it, thanks! Hope the whole thing will get
> queued soon.
>

Your patches are always one step ahead of me :D

If your series doesn't show up in the KVM queue soon, I will rebase my
patch series on top of your series

> As for your change, I think it is better suited for
> include/x86_64/hyperv.h instead of include/x86_64/processor.h anyway,
> I'm trying to keep all Hyper-V related stuff separate as Hyper-V CPUID
> leaves intersect with KVM's, e.g. 0x40000001.
>

Sounds good.

> >  /*
> >   * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
> >   * value/property as opposed to a single-bit feature.  Again, pack the info
> > diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
> > new file mode 100644
> > index 000000000000..13c1b03294a4
> > --- /dev/null
> > +++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
> > @@ -0,0 +1,94 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + * Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001),
> > + * exit to userspace and receive result in guest.
> > + *
> > + * Negative tests are present in hyperv_features.c
> > + *
> > + * Copyright 2022 Google LLC
> > + * Author: Vipin Sharma <vipinsh@google.com>
> > + */
> > +
> > +#include "kvm_util.h"
> > +#include "processor.h"
> > +#include "hyperv.h"
> > +
> > +/* Any value is fine */
> > +#define EXT_CAPABILITIES 0xbull
> > +
> > +static void guest_code(vm_vaddr_t in_pg_gpa, vm_vaddr_t out_pg_gpa,
> > +                    vm_vaddr_t out_pg_gva)
> > +{
> > +     uint64_t *output_gva;
> > +
> > +     wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
> > +     wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa);
> > +
> > +     output_gva = (uint64_t *)out_pg_gva;
> > +
> > +     hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa);
> > +
> > +     /* TLFS states output will be a uint64_t value */
> > +     GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES);
> > +
> > +     GUEST_DONE();
> > +}
> > +
> > +int main(void)
> > +{
> > +     vm_vaddr_t hcall_out_page;
> > +     vm_vaddr_t hcall_in_page;
> > +     struct kvm_vcpu *vcpu;
> > +     struct kvm_run *run;
> > +     struct kvm_vm *vm;
> > +     uint64_t *outval;
> > +     struct ucall uc;
> > +
> > +     /* Verify if extended hypercalls are supported */
> > +     if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(),
> > +                        X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS)) {
> > +             print_skip("Extended calls not supported by the kernel");
> > +             exit(KSFT_SKIP);
> > +     }
> > +
> > +     vm = vm_create_with_one_vcpu(&vcpu, guest_code);
> > +     run = vcpu->run;
> > +     vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
>
> Do we need this enforcement assuming we have no plans to add 'negative'
> tests here (hyperv_features does it just fine)? vcpu_set_hv_cpuid()
> enables everything anyway...
>

We do not. I will remove it.

> > +     vcpu_set_hv_cpuid(vcpu);
> > +
> > +     /* Hypercall input */
> > +     hcall_in_page = vm_vaddr_alloc_pages(vm, 1);
> > +     memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size);
> > +
> > +     /* Hypercall output */
> > +     hcall_out_page = vm_vaddr_alloc_pages(vm, 1);
> > +     memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size);
> > +
> > +     vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
> > +                   addr_gva2gpa(vm, hcall_out_page), hcall_out_page);
> > +
> > +     vcpu_run(vcpu);
> > +
> > +     ASSERT_EXIT_REASON(vcpu, KVM_EXIT_HYPERV);
> > +
> > +     outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]);
> > +     *outval = EXT_CAPABILITIES;
> > +     run->hyperv.u.hcall.result = HV_STATUS_SUCCESS;
> > +
> > +     vcpu_run(vcpu);
> > +
> > +     ASSERT_EXIT_REASON(vcpu, KVM_EXIT_IO);
> > +
> > +     switch (get_ucall(vcpu, &uc)) {
> > +     case UCALL_ABORT:
> > +             REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld");
> > +             break;
> > +     case UCALL_DONE:
> > +             break;
> > +     default:
> > +             TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
> > +     }
> > +
> > +     kvm_vm_free(vm);
> > +     return 0;
> > +}
>
> --
> Vitaly
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 082855d94c72..b17874697d74 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -24,6 +24,7 @@ 
 /x86_64/hyperv_clock
 /x86_64/hyperv_cpuid
 /x86_64/hyperv_evmcs
+/x86_64/hyperv_extended_hypercalls
 /x86_64/hyperv_features
 /x86_64/hyperv_ipi
 /x86_64/hyperv_svm_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 2275ba861e0e..a0e12f5d9835 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -87,6 +87,7 @@  TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_extended_hypercalls
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 5d310abe6c3f..f167396b887b 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -168,6 +168,9 @@  struct kvm_x86_cpu_feature {
 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
 #define X86_FEATURE_KVM_MIGRATION_CONTROL	KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
 
+/* Hyper-V defined paravirt features */
+#define X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS	KVM_X86_CPU_FEATURE(0x40000003, 0, EBX, 20)
+
 /*
  * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
  * value/property as opposed to a single-bit feature.  Again, pack the info
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
new file mode 100644
index 000000000000..13c1b03294a4
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
@@ -0,0 +1,94 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test Hyper-V extended hypercall, HV_EXT_CALL_QUERY_CAPABILITIES (0x8001),
+ * exit to userspace and receive result in guest.
+ *
+ * Negative tests are present in hyperv_features.c
+ *
+ * Copyright 2022 Google LLC
+ * Author: Vipin Sharma <vipinsh@google.com>
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "hyperv.h"
+
+/* Any value is fine */
+#define EXT_CAPABILITIES 0xbull
+
+static void guest_code(vm_vaddr_t in_pg_gpa, vm_vaddr_t out_pg_gpa,
+		       vm_vaddr_t out_pg_gva)
+{
+	uint64_t *output_gva;
+
+	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+	wrmsr(HV_X64_MSR_HYPERCALL, in_pg_gpa);
+
+	output_gva = (uint64_t *)out_pg_gva;
+
+	hyperv_hypercall(HV_EXT_CALL_QUERY_CAPABILITIES, in_pg_gpa, out_pg_gpa);
+
+	/* TLFS states output will be a uint64_t value */
+	GUEST_ASSERT_EQ(*output_gva, EXT_CAPABILITIES);
+
+	GUEST_DONE();
+}
+
+int main(void)
+{
+	vm_vaddr_t hcall_out_page;
+	vm_vaddr_t hcall_in_page;
+	struct kvm_vcpu *vcpu;
+	struct kvm_run *run;
+	struct kvm_vm *vm;
+	uint64_t *outval;
+	struct ucall uc;
+
+	/* Verify if extended hypercalls are supported */
+	if (!kvm_cpuid_has(kvm_get_supported_hv_cpuid(),
+			   X86_FEATURE_HYPERV_EXTENDED_HYPERCALLS)) {
+		print_skip("Extended calls not supported by the kernel");
+		exit(KSFT_SKIP);
+	}
+
+	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+	run = vcpu->run;
+	vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
+	vcpu_set_hv_cpuid(vcpu);
+
+	/* Hypercall input */
+	hcall_in_page = vm_vaddr_alloc_pages(vm, 1);
+	memset(addr_gva2hva(vm, hcall_in_page), 0x0, vm->page_size);
+
+	/* Hypercall output */
+	hcall_out_page = vm_vaddr_alloc_pages(vm, 1);
+	memset(addr_gva2hva(vm, hcall_out_page), 0x0, vm->page_size);
+
+	vcpu_args_set(vcpu, 3, addr_gva2gpa(vm, hcall_in_page),
+		      addr_gva2gpa(vm, hcall_out_page), hcall_out_page);
+
+	vcpu_run(vcpu);
+
+	ASSERT_EXIT_REASON(vcpu, KVM_EXIT_HYPERV);
+
+	outval = addr_gpa2hva(vm, run->hyperv.u.hcall.params[1]);
+	*outval = EXT_CAPABILITIES;
+	run->hyperv.u.hcall.result = HV_STATUS_SUCCESS;
+
+	vcpu_run(vcpu);
+
+	ASSERT_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+	switch (get_ucall(vcpu, &uc)) {
+	case UCALL_ABORT:
+		REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld");
+		break;
+	case UCALL_DONE:
+		break;
+	default:
+		TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
+	}
+
+	kvm_vm_free(vm);
+	return 0;
+}