diff mbox series

[v1,5/5] selftests: KVM: SVM: Add Idle HLT intercept test

Message ID 20240307054623.13632-6-manali.shukla@amd.com (mailing list archive)
State New
Headers show
Series Add support for the Idle HLT intercept feature | expand

Commit Message

Manali Shukla March 7, 2024, 5:46 a.m. UTC
From: Manali Shukla <Manali.Shukla@amd.com>

The Execution of the HLT instruction results in a VMEXIT. Hypervisor
observes pending V_INTR and V_NMI events just after the VMEXIT
generated by the HLT for the vCPU and causes VM entry to service the
pending events.  The Idle HLT intercept feature avoids the wasteful
VMEXIT during the halt if there are pending V_INTR and V_NMI events
for the vCPU.

The selftest for the Idle HLT intercept instruments the above-mentioned
scenario.

Signed-off-by: Manali Shukla <Manali.Shukla@amd.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../selftests/kvm/x86_64/svm_idlehlt_test.c   | 118 ++++++++++++++++++
 2 files changed, 119 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c

Comments

Sean Christopherson March 7, 2024, 6:22 p.m. UTC | #1
On Thu, Mar 07, 2024, Manali Shukla wrote:
> From: Manali Shukla <Manali.Shukla@amd.com>
> 
> The Execution of the HLT instruction results in a VMEXIT. Hypervisor
> observes pending V_INTR and V_NMI events just after the VMEXIT
> generated by the HLT for the vCPU and causes VM entry to service the
> pending events.  The Idle HLT intercept feature avoids the wasteful
> VMEXIT during the halt if there are pending V_INTR and V_NMI events
> for the vCPU.
> 
> The selftest for the Idle HLT intercept instruments the above-mentioned
> scenario.
> 
> Signed-off-by: Manali Shukla <Manali.Shukla@amd.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../selftests/kvm/x86_64/svm_idlehlt_test.c   | 118 ++++++++++++++++++
>  2 files changed, 119 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
> 
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 492e937fab00..7ad03dc4f35e 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -89,6 +89,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
>  TEST_GEN_PROGS_x86_64 += x86_64/smm_test
>  TEST_GEN_PROGS_x86_64 += x86_64/state_test
>  TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
> +TEST_GEN_PROGS_x86_64 += x86_64/svm_idlehlt_test

Uber nit, maybe svm_idle_hlt_test?  I find "idlehlt" hard to parse.

>  TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
>  TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
>  TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
> diff --git a/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
> new file mode 100644
> index 000000000000..1564511799d4
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
> @@ -0,0 +1,118 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + *  svm_idlehalt_test
> + *

Please omit this, file comments that state the name of the test inevitably
become stale (see above).

> + *  Copyright (C) 2024 Advanced Micro Devices, Inc.
> + *
> + *  For licencing details see kernel-base/COPYING

This seems gratuitous, doesn't the SPDX stuff take care this?

> + *
> + *  Author:
> + *  Manali Shukla  <manali.shukla@amd.com>
> + */
> +#include "kvm_util.h"
> +#include "svm_util.h"
> +#include "processor.h"
> +#include "test_util.h"
> +#include "apic.h"
> +
> +#define VINTR_VECTOR     0x30
> +#define NUM_ITERATIONS 100000

What's the runtime?  If it's less than a second, then whatever, but if it's at
all longer than that, then I'd prefer to use a lower default and make this user-
configurable.

> +/*
> + * Incremented in the VINTR handler. Provides evidence to the sender that the
> + * VINR is arrived at the destination.

Evidence is useless if there's no detective looking for it.  Yeah, it gets
printed out in the end, but in reality, no one is going to look at that.

Rather than read this from the host, just make it a non-volatile bool and assert
that it set after every 

> + */
> +static volatile uint64_t vintr_rcvd;
> +
> +void verify_apic_base_addr(void)
> +{
> +	uint64_t msr = rdmsr(MSR_IA32_APICBASE);
> +	uint64_t base = GET_APIC_BASE(msr);
> +
> +	GUEST_ASSERT(base == APIC_DEFAULT_GPA);
> +}
> +
> +/*
> + * The halting guest code instruments the scenario where there is a V_INTR pending
> + * event available while hlt instruction is executed. The HLT VM Exit doesn't
> + * occur in above-mentioned scenario if the Idle HLT intercept feature is enabled.
> + */
> +
> +static void halter_guest_code(void)

Just "guest_code()".  Yeah, it's a weird generic name, but at this point it's so
ubiquitous that it's analogous to main(), i.e. readers know that guest_code() is
the entry point.  And deviating from that suggests that there is a second vCPU
running _other_ guest code (otherwise, why differentiate?), which isn't the case.

> +{
> +	uint32_t icr_val;
> +	int i;
> +
> +	verify_apic_base_addr();

Why?  The test will fail if the APIC is borked, this is just unnecessary noise
that distracts readers.


> +	xapic_enable();
> +
> +	icr_val = (APIC_DEST_SELF | APIC_INT_ASSERT | VINTR_VECTOR);
> +
> +	for (i = 0; i < NUM_ITERATIONS; i++) {
> +		xapic_write_reg(APIC_ICR, icr_val);
> +		asm volatile("sti; hlt; cli");

Please add safe_halt() and cli() helpers in processor.h.  And then do:

		cli();
		xapic_write_reg(APIC_ICR, icr_val);
		safe_halt();

to guarantee that interrupts are disabled when the IPI is sent.  And as above,

		GUEST_ASSERT(READ_ONCE(irq_received));
		WRITE_ONCE(irq_received, false);

> +	}
> +	GUEST_DONE();
> +}
> +
> +static void guest_vintr_handler(struct ex_regs *regs)
> +{
> +	vintr_rcvd++;
> +	xapic_write_reg(APIC_EOI, 0x30);

EOI is typically written with '0', not the vector, because the legacy EOI register
clears the highest ISR vector, not whatever is specified.  IIRC, one of the Intel
or AMD specs even says to use '0'.

AMD's Specific EOI register does allow targeting a specific vector, but that's
not what's being used here.

> +}
> +
> +int main(int argc, char *argv[])
> +{
> +	struct kvm_vm *vm;
> +	struct kvm_vcpu *vcpu;
> +	struct ucall uc;
> +	uint64_t  halt_exits, vintr_exits;
> +	uint64_t *pvintr_rcvd;
> +
> +	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));

No, this test doesn't require SVM, which is KVM advertising *nested* SVM.  This
test does require idle-hlt support though...

> +	/* Check the extension for binary stats */
> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
> +
> +	vm = vm_create_with_one_vcpu(&vcpu, halter_guest_code);
> +
> +	vm_init_descriptor_tables(vm);
> +	vcpu_init_descriptor_tables(vcpu);
> +	vm_install_exception_handler(vm, VINTR_VECTOR, guest_vintr_handler);
> +	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
> +
> +	vcpu_run(vcpu);
> +	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
> +
> +	halt_exits = vcpu_get_stat(vcpu, "halt_exits");

Is there really no way to get binary stats without having to pass in strings?

> +	vintr_exits = vcpu_get_stat(vcpu, "irq_window_exits");
> +	pvintr_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&vintr_rcvd);
> +
> +	switch (get_ucall(vcpu, &uc)) {
> +	case UCALL_ABORT:
> +		REPORT_GUEST_ASSERT(uc);
> +		/* NOT REACHED */

Eh, just put a "break;" here and drop the comment.

> +	case UCALL_DONE:
> +		goto done;

break;

> +	default:
> +		TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
> +	}
> +
> +done:
> +	TEST_ASSERT(halt_exits == 0,

So in all honesty, this isn't a very interesting test.  It's more of a CPU test
than a KVM test.  I do think it's worth adding, because why not.

But I'd also like to see a testcase for KVM_X86_DISABLE_EXITS_HLT.  It would be
a generic test, i.e. not specific to idle-hlt since there is no dependency and
the test shouldn't care.  I _think_ it would be fairly straightforward: create
a VM without an in-kernel APIC (so that HLT exits to userspace), disable HLT
interception, send a signal from a different task after some delay, and execute
HLT in the guest.  Then verify the vCPU exited because of -EINTR and not HLT.

> +		    "Test Failed:\n"
> +		    "Guest executed VINTR followed by halts: %d times\n"
> +		    "The guest exited due to halt: %ld times and number\n"
> +		    "of vintr exits: %ld and vintr got re-injected: %ld times\n",
> +		    NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);

I appreciate the effort to provide more info, but this is way too noisy.  If
anything, print gory details in a pr_debug() *before* the assert (see below),
and then simply do:

	TEST_ASSERT_EQ(halt_exits, 0);

> +	fprintf(stderr,
> +		"Test Successful:\n"
> +		"Guest executed VINTR followed by halts: %d times\n"
> +		"The guest exited due to halt: %ld times and number\n"
> +		"of vintr exits: %ld and vintr got re-injected: %ld times\n",
> +		NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);

And this should be pr_debug(), because no human is going to look at this except
when the test isn't working correctly.

> +
> +	kvm_vm_free(vm);
> +	return 0;
> +}
> -- 
> 2.34.1
> /pvintr_rcvd
Sean Christopherson March 7, 2024, 6:24 p.m. UTC | #2
On Thu, Mar 07, 2024, Sean Christopherson wrote:
> On Thu, Mar 07, 2024, Manali Shukla wrote: From: Manali Shukla <Manali.Shukla@amd.com>
> > +	xapic_enable();
> > +
> > +	icr_val = (APIC_DEST_SELF | APIC_INT_ASSERT | VINTR_VECTOR);
> > +
> > +	for (i = 0; i < NUM_ITERATIONS; i++) {
> > +		xapic_write_reg(APIC_ICR, icr_val);
> > +		asm volatile("sti; hlt; cli");
> 
> Please add safe_halt() and cli() helpers in processor.h.  And then do:

Doh, saw something shiny and forgot to finish my though.  For safe_halt(), copy
the thing verbatim from KVM-Unit-Tests, as not everyone is familiar with the
sti=>hlt trick.

/*
 * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
 * intended to be a wake event arrives *after* HLT is executed.  Modern CPUs,
 * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
 * instruction after STI, *if* RFLAGS.IF=0 before STI.  Note, Intel CPUs may
 * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
 */
static inline void safe_halt(void)
{
	asm volatile("sti; hlt");
}
Manali Shukla March 14, 2024, 5:35 a.m. UTC | #3
Hi Sean,

Thank you for reviewing my patches.

On 3/7/2024 11:52 PM, Sean Christopherson wrote:
> On Thu, Mar 07, 2024, Manali Shukla wrote:
>> From: Manali Shukla <Manali.Shukla@amd.com>
>>
>> The Execution of the HLT instruction results in a VMEXIT. Hypervisor
>> observes pending V_INTR and V_NMI events just after the VMEXIT
>> generated by the HLT for the vCPU and causes VM entry to service the
>> pending events.  The Idle HLT intercept feature avoids the wasteful
>> VMEXIT during the halt if there are pending V_INTR and V_NMI events
>> for the vCPU.
>>
>> The selftest for the Idle HLT intercept instruments the above-mentioned
>> scenario.
>>
>> Signed-off-by: Manali Shukla <Manali.Shukla@amd.com>
>> ---
>>  tools/testing/selftests/kvm/Makefile          |   1 +
>>  .../selftests/kvm/x86_64/svm_idlehlt_test.c   | 118 ++++++++++++++++++
>>  2 files changed, 119 insertions(+)
>>  create mode 100644 tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
>>
>> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
>> index 492e937fab00..7ad03dc4f35e 100644
>> --- a/tools/testing/selftests/kvm/Makefile
>> +++ b/tools/testing/selftests/kvm/Makefile
>> @@ -89,6 +89,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/smm_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/state_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
>> +TEST_GEN_PROGS_x86_64 += x86_64/svm_idlehlt_test
> 
> Uber nit, maybe svm_idle_hlt_test?  I find "idlehlt" hard to parse.
Sure I will change it to svm_idle_hlt_test.

> 
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
>>  TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
>> diff --git a/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
>> new file mode 100644
>> index 000000000000..1564511799d4
>> --- /dev/null
>> +++ b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
>> @@ -0,0 +1,118 @@
>> +// SPDX-License-Identifier: GPL-2.0-only
>> +/*
>> + *  svm_idlehalt_test
>> + *
> 
> Please omit this, file comments that state the name of the test inevitably
> become stale (see above).

Sure. I will remove it.
> 
>> + *  Copyright (C) 2024 Advanced Micro Devices, Inc.
>> + *
>> + *  For licencing details see kernel-base/COPYING
> 
> This seems gratuitous, doesn't the SPDX stuff take care this?

Agreed, I will remove this.

> 
>> + *
>> + *  Author:
>> + *  Manali Shukla  <manali.shukla@amd.com>
>> + */
>> +#include "kvm_util.h"
>> +#include "svm_util.h"
>> +#include "processor.h"
>> +#include "test_util.h"
>> +#include "apic.h"
>> +
>> +#define VINTR_VECTOR     0x30
>> +#define NUM_ITERATIONS 100000
> 
> What's the runtime?  If it's less than a second, then whatever, but if it's at
> all longer than that, then I'd prefer to use a lower default and make this user-
> configurable.

It takes ~34s to run this test. 
> 
>> +/*
>> + * Incremented in the VINTR handler. Provides evidence to the sender that the
>> + * VINR is arrived at the destination.
> 
> Evidence is useless if there's no detective looking for it.  Yeah, it gets
> printed out in the end, but in reality, no one is going to look at that.
> 
> Rather than read this from the host, just make it a non-volatile bool and assert
> that it set after every 
> 

Sure. I can do that.

>> + */
>> +static volatile uint64_t vintr_rcvd;
>> +
>> +void verify_apic_base_addr(void)
>> +{
>> +	uint64_t msr = rdmsr(MSR_IA32_APICBASE);
>> +	uint64_t base = GET_APIC_BASE(msr);
>> +
>> +	GUEST_ASSERT(base == APIC_DEFAULT_GPA);
>> +}
>> +
>> +/*
>> + * The halting guest code instruments the scenario where there is a V_INTR pending
>> + * event available while hlt instruction is executed. The HLT VM Exit doesn't
>> + * occur in above-mentioned scenario if the Idle HLT intercept feature is enabled.
>> + */
>> +
>> +static void halter_guest_code(void)
> 
> Just "guest_code()".  Yeah, it's a weird generic name, but at this point it's so
> ubiquitous that it's analogous to main(), i.e. readers know that guest_code() is
> the entry point.  And deviating from that suggests that there is a second vCPU
> running _other_ guest code (otherwise, why differentiate?), which isn't the case.
> 

Sure.

>> +{
>> +	uint32_t icr_val;
>> +	int i;
>> +
>> +	verify_apic_base_addr();
> 
> Why?  The test will fail if the APIC is borked, this is just unnecessary noise
> that distracts readers.
> 
> Sure. I will remove it in V2.

>> +	xapic_enable();
>> +
>> +	icr_val = (APIC_DEST_SELF | APIC_INT_ASSERT | VINTR_VECTOR);
>> +
>> +	for (i = 0; i < NUM_ITERATIONS; i++) {
>> +		xapic_write_reg(APIC_ICR, icr_val);
>> +		asm volatile("sti; hlt; cli");
> 
> Please add safe_halt() and cli() helpers in processor.h.  And then do:
> 
> 		cli();
> 		xapic_write_reg(APIC_ICR, icr_val);
> 		safe_halt();
> 
> to guarantee that interrupts are disabled when the IPI is sent.  And as above,
> 
> 		GUEST_ASSERT(READ_ONCE(irq_received));
> 		WRITE_ONCE(irq_received, false);
> 

Sure.
>> +	}
>> +	GUEST_DONE();
>> +}
>> +
>> +static void guest_vintr_handler(struct ex_regs *regs)
>> +{
>> +	vintr_rcvd++;
>> +	xapic_write_reg(APIC_EOI, 0x30);
> 
> EOI is typically written with '0', not the vector, because the legacy EOI register
> clears the highest ISR vector, not whatever is specified.  IIRC, one of the Intel
> or AMD specs even says to use '0'.
> 
> AMD's Specific EOI register does allow targeting a specific vector, but that's
> not what's being used here.

Sure.
> 
>> +}
>> +
>> +int main(int argc, char *argv[])
>> +{
>> +	struct kvm_vm *vm;
>> +	struct kvm_vcpu *vcpu;
>> +	struct ucall uc;
>> +	uint64_t  halt_exits, vintr_exits;
>> +	uint64_t *pvintr_rcvd;
>> +
>> +	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
> 
> No, this test doesn't require SVM, which is KVM advertising *nested* SVM.  This
> test does require idle-hlt support though...

Sure. I will add it in V2.

> 
>> +	/* Check the extension for binary stats */
>> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
>> +
>> +	vm = vm_create_with_one_vcpu(&vcpu, halter_guest_code);
>> +
>> +	vm_init_descriptor_tables(vm);
>> +	vcpu_init_descriptor_tables(vcpu);
>> +	vm_install_exception_handler(vm, VINTR_VECTOR, guest_vintr_handler);
>> +	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
>> +
>> +	vcpu_run(vcpu);
>> +	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
>> +
>> +	halt_exits = vcpu_get_stat(vcpu, "halt_exits");
> 
> Is there really no way to get binary stats without having to pass in strings?

I could see one of the test case is passing the strings to get binary stats of
vm. That is why I used strings to get binary stats of vcpu. I will try to find another
way to get the binary stats.

> 
>> +	vintr_exits = vcpu_get_stat(vcpu, "irq_window_exits");
>> +	pvintr_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&vintr_rcvd);
>> +
>> +	switch (get_ucall(vcpu, &uc)) {
>> +	case UCALL_ABORT:
>> +		REPORT_GUEST_ASSERT(uc);
>> +		/* NOT REACHED */
> 
> Eh, just put a "break;" here and drop the comment.
> 
Sure.

>> +	case UCALL_DONE:
>> +		goto done;
> 
> break;
> 
>> +	default:
>> +		TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
>> +	}
>> +
>> +done:
>> +	TEST_ASSERT(halt_exits == 0,
> 
> So in all honesty, this isn't a very interesting test.  It's more of a CPU test
> than a KVM test.  I do think it's worth adding, because why not.
> 
> But I'd also like to see a testcase for KVM_X86_DISABLE_EXITS_HLT.  It would be
> a generic test, i.e. not specific to idle-hlt since there is no dependency and
> the test shouldn't care.  I _think_ it would be fairly straightforward: create
> a VM without an in-kernel APIC (so that HLT exits to userspace), disable HLT
> interception, send a signal from a different task after some delay, and execute
> HLT in the guest.  Then verify the vCPU exited because of -EINTR and not HLT.

I will create this test.
> 
>> +		    "Test Failed:\n"
>> +		    "Guest executed VINTR followed by halts: %d times\n"
>> +		    "The guest exited due to halt: %ld times and number\n"
>> +		    "of vintr exits: %ld and vintr got re-injected: %ld times\n",
>> +		    NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);
> 
> I appreciate the effort to provide more info, but this is way too noisy.  If
> anything, print gory details in a pr_debug() *before* the assert (see below),
> and then simply do:
> 
> 	TEST_ASSERT_EQ(halt_exits, 0);
> 
Sure.

>> +	fprintf(stderr,
>> +		"Test Successful:\n"
>> +		"Guest executed VINTR followed by halts: %d times\n"
>> +		"The guest exited due to halt: %ld times and number\n"
>> +		"of vintr exits: %ld and vintr got re-injected: %ld times\n",
>> +		NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);
> 
> And this should be pr_debug(), because no human is going to look at this except
> when the test isn't working correctly.

I will change it to pr_debug() in V2.
> 
>> +
>> +	kvm_vm_free(vm);
>> +	return 0;
>> +}
>> -- 
>> 2.34.1
>> /pvintr_rcvd
>
Sean Christopherson March 14, 2024, 3:06 p.m. UTC | #4
On Thu, Mar 14, 2024, Manali Shukla wrote:
> >> +#define VINTR_VECTOR     0x30
> >> +#define NUM_ITERATIONS 100000
> > 
> > What's the runtime?  If it's less than a second, then whatever, but if it's at
> > all longer than that, then I'd prefer to use a lower default and make this user-
> > configurable.
> 
> It takes ~34s to run this test. 

LOL, yeah, no.  That's 33+ seconds of wasted time.  From a *KVM* perspective, this
is quite binary: either KVM intercepts HLT or it doesn't.  Any timing bugs would
be purely CPU bugs.

Please adjust this to have the default runtime <1 second.  If you feel strongly
about the need for a long-running test, feel free to add a command line option
to control the number of iterations.
Manali Shukla March 22, 2024, 4:03 p.m. UTC | #5
On 3/14/2024 8:36 PM, Sean Christopherson wrote:
> On Thu, Mar 14, 2024, Manali Shukla wrote:
>>>> +#define VINTR_VECTOR     0x30
>>>> +#define NUM_ITERATIONS 100000
>>>
>>> What's the runtime?  If it's less than a second, then whatever, but if it's at
>>> all longer than that, then I'd prefer to use a lower default and make this user-
>>> configurable.
>>
>> It takes ~34s to run this test. 
> 
> LOL, yeah, no.  That's 33+ seconds of wasted time.  From a *KVM* perspective, this
> is quite binary: either KVM intercepts HLT or it doesn't.  Any timing bugs would
> be purely CPU bugs.>> Please adjust this to have the default runtime <1 second.  If you feel strongly
> about the need for a long-running test, feel free to add a command line option
> to control the number of iterations.

Ack.
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 492e937fab00..7ad03dc4f35e 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -89,6 +89,7 @@  TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
 TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_idlehlt_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
diff --git a/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
new file mode 100644
index 000000000000..1564511799d4
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_idlehlt_test.c
@@ -0,0 +1,118 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *  svm_idlehalt_test
+ *
+ *  Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ *  For licencing details see kernel-base/COPYING
+ *
+ *  Author:
+ *  Manali Shukla  <manali.shukla@amd.com>
+ */
+#include "kvm_util.h"
+#include "svm_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include "apic.h"
+
+#define VINTR_VECTOR     0x30
+#define NUM_ITERATIONS 100000
+
+/*
+ * Incremented in the VINTR handler. Provides evidence to the sender that the
+ * VINR is arrived at the destination.
+ */
+static volatile uint64_t vintr_rcvd;
+
+void verify_apic_base_addr(void)
+{
+	uint64_t msr = rdmsr(MSR_IA32_APICBASE);
+	uint64_t base = GET_APIC_BASE(msr);
+
+	GUEST_ASSERT(base == APIC_DEFAULT_GPA);
+}
+
+/*
+ * The halting guest code instruments the scenario where there is a V_INTR pending
+ * event available while hlt instruction is executed. The HLT VM Exit doesn't
+ * occur in above-mentioned scenario if the Idle HLT intercept feature is enabled.
+ */
+
+static void halter_guest_code(void)
+{
+	uint32_t icr_val;
+	int i;
+
+	verify_apic_base_addr();
+	xapic_enable();
+
+	icr_val = (APIC_DEST_SELF | APIC_INT_ASSERT | VINTR_VECTOR);
+
+	for (i = 0; i < NUM_ITERATIONS; i++) {
+		xapic_write_reg(APIC_ICR, icr_val);
+		asm volatile("sti; hlt; cli");
+	}
+	GUEST_DONE();
+}
+
+static void guest_vintr_handler(struct ex_regs *regs)
+{
+	vintr_rcvd++;
+	xapic_write_reg(APIC_EOI, 0x30);
+}
+
+int main(int argc, char *argv[])
+{
+	struct kvm_vm *vm;
+	struct kvm_vcpu *vcpu;
+	struct ucall uc;
+	uint64_t  halt_exits, vintr_exits;
+	uint64_t *pvintr_rcvd;
+
+	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+	/* Check the extension for binary stats */
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
+
+	vm = vm_create_with_one_vcpu(&vcpu, halter_guest_code);
+
+	vm_init_descriptor_tables(vm);
+	vcpu_init_descriptor_tables(vcpu);
+	vm_install_exception_handler(vm, VINTR_VECTOR, guest_vintr_handler);
+	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+
+	vcpu_run(vcpu);
+	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+
+	halt_exits = vcpu_get_stat(vcpu, "halt_exits");
+	vintr_exits = vcpu_get_stat(vcpu, "irq_window_exits");
+	pvintr_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&vintr_rcvd);
+
+	switch (get_ucall(vcpu, &uc)) {
+	case UCALL_ABORT:
+		REPORT_GUEST_ASSERT(uc);
+		/* NOT REACHED */
+	case UCALL_DONE:
+		goto done;
+	default:
+		TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+	}
+
+done:
+	TEST_ASSERT(halt_exits == 0,
+		    "Test Failed:\n"
+		    "Guest executed VINTR followed by halts: %d times\n"
+		    "The guest exited due to halt: %ld times and number\n"
+		    "of vintr exits: %ld and vintr got re-injected: %ld times\n",
+		    NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);
+
+	fprintf(stderr,
+		"Test Successful:\n"
+		"Guest executed VINTR followed by halts: %d times\n"
+		"The guest exited due to halt: %ld times and number\n"
+		"of vintr exits: %ld and vintr got re-injected: %ld times\n",
+		NUM_ITERATIONS, halt_exits, vintr_exits, *pvintr_rcvd);
+
+	kvm_vm_free(vm);
+	return 0;
+}