diff mbox

[v1,5/5] x86: Hyper-V SynIC timers test

Message ID 1448555397-29150-6-git-send-email-asmetanin@virtuozzo.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrey Smetanin Nov. 26, 2015, 4:29 p.m. UTC
The test checks Hyper-V SynIC timers functionality.
The test runs on every vCPU and performs start/stop
of periodic/one-shot timers (with period=1ms) and checks
validity of received expiration messages in appropriate
ISR's.

Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Marcelo Tosatti <mtosatti@redhat.com>
CC: Roman Kagan <rkagan@virtuozzo.com>
CC: Denis V. Lunev <den@openvz.org>
CC: qemu-devel@nongnu.org

---
 config/config-x86-common.mak |   4 +-
 x86/hyperv_stimer.c          | 500 +++++++++++++++++++++++++++++++++++++++++++
 x86/unittests.cfg            |   5 +
 3 files changed, 508 insertions(+), 1 deletion(-)
 create mode 100644 x86/hyperv_stimer.c

Comments

Paolo Bonzini Nov. 27, 2015, 11:17 a.m. UTC | #1
The test logic is good, but the glue can be improved a bit so that the
output is more useful if it breaks.

On 26/11/2015 17:29, Andrey Smetanin wrote:
> The test checks Hyper-V SynIC timers functionality.
> The test runs on every vCPU and performs start/stop
> of periodic/one-shot timers (with period=1ms) and checks
> validity of received expiration messages in appropriate
> ISR's.
> 
> Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
> CC: Paolo Bonzini <pbonzini@redhat.com>
> CC: Marcelo Tosatti <mtosatti@redhat.com>
> CC: Roman Kagan <rkagan@virtuozzo.com>
> CC: Denis V. Lunev <den@openvz.org>
> CC: qemu-devel@nongnu.org
> 
> ---
>  config/config-x86-common.mak |   4 +-
>  x86/hyperv_stimer.c          | 500 +++++++++++++++++++++++++++++++++++++++++++
>  x86/unittests.cfg            |   5 +
>  3 files changed, 508 insertions(+), 1 deletion(-)
>  create mode 100644 x86/hyperv_stimer.c
> 
> diff --git a/config/config-x86-common.mak b/config/config-x86-common.mak
> index f64874d..a75be87 100644
> --- a/config/config-x86-common.mak
> +++ b/config/config-x86-common.mak
> @@ -37,7 +37,7 @@ tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>                 $(TEST_DIR)/s3.flat $(TEST_DIR)/pmu.flat \
>                 $(TEST_DIR)/tsc_adjust.flat $(TEST_DIR)/asyncpf.flat \
>                 $(TEST_DIR)/init.flat $(TEST_DIR)/smap.flat \
> -               $(TEST_DIR)/hyperv_synic.flat
> +               $(TEST_DIR)/hyperv_synic.flat $(TEST_DIR)/hyperv_stimer.flat \
>  
>  ifdef API
>  tests-common += api/api-sample
> @@ -115,6 +115,8 @@ $(TEST_DIR)/memory.elf: $(cstart.o) $(TEST_DIR)/memory.o
>  
>  $(TEST_DIR)/hyperv_synic.elf: $(cstart.o) $(TEST_DIR)/hyperv_synic.o
>  
> +$(TEST_DIR)/hyperv_stimer.elf: $(cstart.o) $(TEST_DIR)/hyperv_stimer.o
> +
>  arch_clean:
>  	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
>  	$(TEST_DIR)/.*.d lib/x86/.*.d
> diff --git a/x86/hyperv_stimer.c b/x86/hyperv_stimer.c
> new file mode 100644
> index 0000000..e9186ca
> --- /dev/null
> +++ b/x86/hyperv_stimer.c
> @@ -0,0 +1,500 @@
> +#include "libcflat.h"
> +#include "processor.h"
> +#include "msr.h"
> +#include "isr.h"
> +#include "vm.h"
> +#include "apic.h"
> +#include "desc.h"
> +#include "io.h"
> +#include "smp.h"
> +#include "atomic.h"
> +
> +#define MAX_CPUS 4
> +#define HYPERV_CPUID_FEATURES                   0x40000003
> +
> +#define HV_SYNIC_CONTROL_ENABLE                 (1ULL << 0)
> +#define HV_SYNIC_SIMP_ENABLE                    (1ULL << 0)
> +#define HV_SYNIC_SIEFP_ENABLE                   (1ULL << 0)
> +#define HV_SYNIC_SINT_MASKED                    (1ULL << 16)
> +#define HV_SYNIC_SINT_AUTO_EOI                  (1ULL << 17)
> +#define HV_SYNIC_SINT_VECTOR_MASK               (0xFF)
> +#define HV_SYNIC_SINT_COUNT                     16
> +
> +#define HV_STIMER_ENABLE                (1ULL << 0)
> +#define HV_STIMER_PERIODIC              (1ULL << 1)
> +#define HV_STIMER_LAZY                  (1ULL << 2)
> +#define HV_STIMER_AUTOENABLE            (1ULL << 3)
> +#define HV_STIMER_SINT(config)          (__u8)(((config) >> 16) & 0x0F)
> +
> +#define HV_SYNIC_STIMER_COUNT           (4)
> +
> +/* Define synthetic interrupt controller message constants. */
> +#define HV_MESSAGE_SIZE                 (256)
> +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT   (240)
> +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT  (30)
> +
> +/* Define hypervisor message types. */
> +enum hv_message_type {
> +        HVMSG_NONE                      = 0x00000000,
> +
> +        /* Memory access messages. */
> +        HVMSG_UNMAPPED_GPA              = 0x80000000,
> +        HVMSG_GPA_INTERCEPT             = 0x80000001,
> +
> +        /* Timer notification messages. */
> +        HVMSG_TIMER_EXPIRED                     = 0x80000010,
> +
> +        /* Error messages. */
> +        HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
> +        HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
> +        HVMSG_UNSUPPORTED_FEATURE               = 0x80000022,
> +
> +        /* Trace buffer complete messages. */
> +        HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
> +
> +        /* Platform-specific processor intercept messages. */
> +        HVMSG_X64_IOPORT_INTERCEPT              = 0x80010000,
> +        HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
> +        HVMSG_X64_CPUID_INTERCEPT               = 0x80010002,
> +        HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
> +        HVMSG_X64_APIC_EOI                      = 0x80010004,
> +        HVMSG_X64_LEGACY_FP_ERROR               = 0x80010005
> +};
> +
> +/* Define synthetic interrupt controller message flags. */
> +union hv_message_flags {
> +        uint8_t asu8;
> +        struct {
> +                uint8_t msg_pending:1;
> +                uint8_t reserved:7;
> +        };
> +};
> +
> +union hv_port_id {
> +        uint32_t asu32;
> +        struct {
> +                uint32_t id:24;
> +                uint32_t reserved:8;
> +        } u;
> +};
> +
> +/* Define port type. */
> +enum hv_port_type {
> +        HVPORT_MSG      = 1,
> +        HVPORT_EVENT            = 2,
> +        HVPORT_MONITOR  = 3
> +};
> +
> +/* Define synthetic interrupt controller message header. */
> +struct hv_message_header {
> +        enum hv_message_type message_type;
> +        uint8_t payload_size;
> +        union hv_message_flags message_flags;
> +        uint8_t reserved[2];
> +        union {
> +                uint64_t sender;
> +                union hv_port_id port;
> +        };
> +};
> +
> +/* Define timer message payload structure. */
> +struct hv_timer_message_payload {
> +        uint32_t timer_index;
> +        uint32_t reserved;
> +        uint64_t expiration_time;       /* When the timer expired */
> +        uint64_t delivery_time; /* When the message was delivered */
> +};
> +
> +/* Define synthetic interrupt controller message format. */
> +struct hv_message {
> +        struct hv_message_header header;
> +        union {
> +                uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
> +        } u;
> +};
> +
> +/* Define the synthetic interrupt message page layout. */
> +struct hv_message_page {
> +        struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
> +};
> +
> +enum {
> +    HV_TEST_DEV_SINT_ROUTE_CREATE = 1,
> +    HV_TEST_DEV_SINT_ROUTE_DESTROY,
> +    HV_TEST_DEV_SINT_ROUTE_SET_SINT
> +};
> +
> +static atomic_t g_cpus_comp_count;
> +static int g_cpus_count;
> +static struct spinlock g_synic_alloc_lock;
> +
> +static bool synic_supported(void)
> +{
> +   return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
> +}
> +
> +static bool stimer_supported(void)
> +{
> +    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
> +}
> +
> +static bool hv_time_ref_counter_supported(void)
> +{
> +    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
> +}
> +
> +static void synic_ctl(u8 ctl, u8 vcpu_id, u8 sint)
> +{
> +    outl((ctl << 16)|((vcpu_id) << 8)|sint, 0x3000);
> +}
> +
> +static void synic_sint_create(int sint, int vec, bool auto_eoi)
> +{
> +    int vcpu = smp_id();
> +
> +    wrmsr(HV_X64_MSR_SINT0 + sint,
> +          (u64)vec | ((auto_eoi) ? HV_SYNIC_SINT_AUTO_EOI : 0));
> +    synic_ctl(HV_TEST_DEV_SINT_ROUTE_CREATE, vcpu, sint);
> +}
> +
> +static void synic_sint_destroy(int sint)
> +{
> +    int vcpu = smp_id();
> +
> +    wrmsr(HV_X64_MSR_SINT0 + sint, 0xFF|HV_SYNIC_SINT_MASKED);
> +    synic_ctl(HV_TEST_DEV_SINT_ROUTE_DESTROY, vcpu, sint);
> +}
> +
> +struct stimer {
> +    int sint;
> +    int index;
> +    atomic_t fire_count;
> +};
> +
> +struct svcpu {
> +    int vcpu;
> +    void *msg_page;
> +    void *evt_page;
> +    struct stimer timer[HV_SYNIC_STIMER_COUNT];
> +};
> +
> +static struct svcpu g_synic_vcpu[MAX_CPUS];
> +
> +static void *synic_alloc_page(void)
> +{
> +    void *page;
> +
> +    spin_lock(&g_synic_alloc_lock);
> +    page = alloc_page();
> +    spin_unlock(&g_synic_alloc_lock);
> +    return page;
> +}
> +
> +static void synic_free_page(void *page)
> +{
> +    spin_lock(&g_synic_alloc_lock);
> +    free_page(page);
> +    spin_unlock(&g_synic_alloc_lock);
> +}
> +
> +static void stimer_init(struct stimer *timer, int index)
> +{
> +    memset(timer, 0, sizeof(*timer));
> +    timer->index = index;
> +}
> +
> +static void synic_enable(void)
> +{
> +    int vcpu = smp_id(), i;
> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
> +
> +    memset(svcpu, 0, sizeof(*svcpu));
> +    svcpu->vcpu = vcpu;
> +    svcpu->msg_page = synic_alloc_page();
> +    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
> +        stimer_init(&svcpu->timer[i], i);
> +    }
> +    wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) |
> +            HV_SYNIC_SIMP_ENABLE);
> +    wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
> +}
> +
> +static void stimer_shutdown(struct stimer *timer)
> +{
> +    wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0);
> +}
> +
> +static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer,
> +                                   u64 expiration_time, u64 delivery_time)
> +{
> +    atomic_inc(&timer->fire_count);
> +}
> +
> +static void process_stimer_msg(struct svcpu *svcpu,
> +                              struct hv_message *msg, int sint)
> +{
> +    struct hv_timer_message_payload *payload =
> +                        (struct hv_timer_message_payload *)msg->u.payload;
> +    struct stimer *timer;
> +
> +    if (msg->header.message_type != HVMSG_TIMER_EXPIRED &&
> +        msg->header.message_type != HVMSG_NONE) {
> +        report("invalid Hyper-V SynIC msg type", false);
> +        report_summary();
> +        exit(-1);
> +        return;
> +    }
> +
> +    if (msg->header.message_type == HVMSG_NONE) {
> +        return;
> +    }
> +
> +    if (msg->header.payload_size < sizeof(*payload)) {
> +        report("invalid Hyper-V SynIC msg payload size", false);
> +        report_summary();
> +        exit(-1);
> +        return;
> +    }
> +
> +    /* Now process timer expiration message */
> +
> +    if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) {
> +        report("invalid Hyper-V SynIC timer index", false);
> +        report_summary();
> +        exit(-1);
> +        return;
> +    }
> +    timer = &svcpu->timer[payload->timer_index];
> +    process_stimer_expired(svcpu, timer, payload->expiration_time,
> +                          payload->delivery_time);
> +
> +    msg->header.message_type = HVMSG_NONE;
> +    mb();
> +    if (msg->header.message_flags.msg_pending) {
> +        wrmsr(HV_X64_MSR_EOM, 0);
> +    }
> +}
> +
> +static void __stimer_isr(int vcpu)
> +{
> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
> +    struct hv_message_page *msg_page;
> +    struct hv_message *msg;
> +    int i;
> +
> +
> +    msg_page = (struct hv_message_page *)svcpu->msg_page;
> +    for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) {
> +        msg = &msg_page->sint_message[i];
> +        process_stimer_msg(svcpu, msg, i);
> +    }
> +}
> +
> +static void stimer_isr(isr_regs_t *regs)
> +{
> +    int vcpu = smp_id();
> +
> +    __stimer_isr(vcpu);
> +    eoi();
> +}
> +
> +static void stimer_isr_auto_eoi(isr_regs_t *regs)
> +{
> +    int vcpu = smp_id();
> +
> +    __stimer_isr(vcpu);
> +}
> +
> +static void stimer_start(struct stimer *timer,
> +                         bool auto_enable, bool periodic,
> +                         u64 tick_100ns, int sint)
> +{
> +    u64 config, count;
> +
> +    timer->sint = sint;
> +    atomic_set(&timer->fire_count, 0);
> +
> +    config = 0;
> +    if (periodic) {
> +        config |= HV_STIMER_PERIODIC;
> +    }
> +
> +    config |= ((u8)(sint & 0xFF)) << 16;
> +    config |= HV_STIMER_ENABLE;
> +    if (auto_enable) {
> +        config |= HV_STIMER_AUTOENABLE;
> +    }
> +
> +    if (periodic) {
> +        count = tick_100ns;
> +    } else {
> +        count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns;
> +    }
> +
> +    if (!auto_enable) {
> +        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
> +        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
> +    } else {
> +        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
> +        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
> +    }
> +}
> +
> +static void stimers_shutdown(void)
> +{
> +    int vcpu = smp_id(), i;
> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
> +
> +    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
> +        stimer_shutdown(&svcpu->timer[i]);
> +    }
> +}
> +
> +static void synic_disable(void)
> +{
> +    int vcpu = smp_id();
> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
> +
> +    wrmsr(HV_X64_MSR_SCONTROL, 0);
> +    wrmsr(HV_X64_MSR_SIMP, 0);
> +    wrmsr(HV_X64_MSR_SIEFP, 0);
> +    synic_free_page(svcpu->msg_page);
> +}
> +
> +static void cpu_comp(void)
> +{
> +    atomic_inc(&g_cpus_comp_count);
> +}
> +
> +static void stimer_test_prepare(void *ctx)
> +{
> +    write_cr3((ulong)ctx);
> +    irq_enable();
> +    synic_enable();
> +    cpu_comp();
> +}
> +
> +static void stimer_test(void *ctx)
> +{
> +    int vcpu = smp_id();
> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
> +    struct stimer *timer1, *timer2;
> +    int sint1, sint2;
> +    u64 one_ms_in_100ns = 10000;
> +    irq_enable();

Unnecessary.

> +    sint1 = 2;
> +    sint2 = 3;

Please make these #defines.

> +    synic_sint_create(sint1, 0xF1, false);
> +    synic_sint_create(sint2, 0xF2, true);

Please move this to stimer_test_prepare...

> +    timer1 = &svcpu->timer[0];
> +    timer2 = &svcpu->timer[1];
> +
> +    /* Check periodic timers */
> +    stimer_start(timer1, false, true, one_ms_in_100ns, sint1);
> +    stimer_start(timer2, false, true, one_ms_in_100ns, sint2);
> +    while ((atomic_read(&timer1->fire_count) < 1000) ||
> +           (atomic_read(&timer2->fire_count) < 1000)) {
> +        pause();
> +    }
> +    stimer_shutdown(timer1);
> +    stimer_shutdown(timer2);

... so that the subtests (between stimer_start and stimer_shutdown) can
each be a separate callback and a separate report() call.

> +    /* Check one-shot timer */
> +    stimer_start(timer1, false, false, one_ms_in_100ns, sint1);
> +    while (atomic_read(&timer1->fire_count) < 1) {
> +        pause();
> +    }
> +    stimer_shutdown(timer1);
> +
> +    /* Check auto-enable one-shot timer */
> +    stimer_start(timer1, true, false, one_ms_in_100ns, sint1);
> +    while (atomic_read(&timer1->fire_count) < 1) {
> +        pause();
> +    }
> +    stimer_shutdown(timer1);
> +
> +    /* Check auto-enable periodic timer */
> +    stimer_start(timer1, true, true, one_ms_in_100ns, sint1);
> +    while (atomic_read(&timer1->fire_count) < 1000) {
> +        pause();
> +    }
> +    stimer_shutdown(timer1);
> +
> +
> +    synic_sint_destroy(sint1);
> +    synic_sint_destroy(sint2);

These go in stimer_test_cleanup.

> +    cpu_comp();
> +}
> +
> +static void stimer_test_cleanup(void *ctx)
> +{
> +    irq_enable();

Why enable again?

> +    stimers_shutdown();
> +    synic_disable();
> +    cpu_comp();
> +}
> +
> +static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx)
> +{
> +    int i;
> +
> +    atomic_set(&g_cpus_comp_count, 0);
> +    for (i = 0; i < g_cpus_count; i++) {
> +        on_cpu_async(i, func, ctx);
> +    }
> +    while (atomic_read(&g_cpus_comp_count) != g_cpus_count) {
> +        pause();
> +    }
> +}
> +
> +static bool stimer_test_all(void)
> +{
> +    int ncpus;
> +
> +    setup_vm();
> +    smp_init();
> +    setup_idt();
> +    enable_apic();
> +
> +    handle_irq(0xF1, stimer_isr);
> +    handle_irq(0xF2, stimer_isr_auto_eoi);
> +
> +    ncpus = cpu_count();
> +    if (ncpus > MAX_CPUS) {
> +        ncpus = MAX_CPUS;
> +    }
> +    printf("cpus = %d\n", ncpus);
> +    g_cpus_count = ncpus;
> +
> +    on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3());
> +    on_each_cpu_async_wait(stimer_test, NULL);
> +    on_each_cpu_async_wait(stimer_test_cleanup, NULL);
> +
> +    return true;
> +}
> +
> +int main(int ac, char **av)
> +{
> +
> +    if (!synic_supported()) {
> +        report("Hyper-V SynIC is not supported", true);
> +        goto done;
> +    }
> +
> +    if (!stimer_supported()) {
> +        report("Hyper-V SynIC timers are not supported", true);
> +        goto done;
> +    }
> +
> +    if (!hv_time_ref_counter_supported()) {
> +        report("Hyper-V time reference counter is not supported", true);
> +        goto done;
> +    }
> +    report("Hyper-V SynIC timers test", stimer_test_all());

You can move report() in stimer_test_all(), since you'll have many of them.

Thanks,

Paolo

> +done:
> +    return report_summary();
> +}
> diff --git a/x86/unittests.cfg b/x86/unittests.cfg
> index ffffc15..dc93451 100644
> --- a/x86/unittests.cfg
> +++ b/x86/unittests.cfg
> @@ -183,3 +183,8 @@ arch = x86_64
>  file = hyperv_synic.flat
>  smp = 2
>  extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
> +
> +[hyperv_stimer]
> +file = hyperv_stimer.flat
> +smp = 2
> +extra_params = -cpu host,hv_time,hv_synic,hv_stimer -device hyperv-testdev
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrey Smetanin Nov. 27, 2015, 11:30 a.m. UTC | #2
On 11/27/2015 02:17 PM, Paolo Bonzini wrote:
> The test logic is good, but the glue can be improved a bit so that the
> output is more useful if it breaks.
Thanks for comments below. I'll redo this patch.
>
> On 26/11/2015 17:29, Andrey Smetanin wrote:
>> The test checks Hyper-V SynIC timers functionality.
>> The test runs on every vCPU and performs start/stop
>> of periodic/one-shot timers (with period=1ms) and checks
>> validity of received expiration messages in appropriate
>> ISR's.
>>
>> Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
>> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
>> CC: Paolo Bonzini <pbonzini@redhat.com>
>> CC: Marcelo Tosatti <mtosatti@redhat.com>
>> CC: Roman Kagan <rkagan@virtuozzo.com>
>> CC: Denis V. Lunev <den@openvz.org>
>> CC: qemu-devel@nongnu.org
>>
>> ---
>>   config/config-x86-common.mak |   4 +-
>>   x86/hyperv_stimer.c          | 500 +++++++++++++++++++++++++++++++++++++++++++
>>   x86/unittests.cfg            |   5 +
>>   3 files changed, 508 insertions(+), 1 deletion(-)
>>   create mode 100644 x86/hyperv_stimer.c
>>
>> diff --git a/config/config-x86-common.mak b/config/config-x86-common.mak
>> index f64874d..a75be87 100644
>> --- a/config/config-x86-common.mak
>> +++ b/config/config-x86-common.mak
>> @@ -37,7 +37,7 @@ tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
>>                  $(TEST_DIR)/s3.flat $(TEST_DIR)/pmu.flat \
>>                  $(TEST_DIR)/tsc_adjust.flat $(TEST_DIR)/asyncpf.flat \
>>                  $(TEST_DIR)/init.flat $(TEST_DIR)/smap.flat \
>> -               $(TEST_DIR)/hyperv_synic.flat
>> +               $(TEST_DIR)/hyperv_synic.flat $(TEST_DIR)/hyperv_stimer.flat \
>>
>>   ifdef API
>>   tests-common += api/api-sample
>> @@ -115,6 +115,8 @@ $(TEST_DIR)/memory.elf: $(cstart.o) $(TEST_DIR)/memory.o
>>
>>   $(TEST_DIR)/hyperv_synic.elf: $(cstart.o) $(TEST_DIR)/hyperv_synic.o
>>
>> +$(TEST_DIR)/hyperv_stimer.elf: $(cstart.o) $(TEST_DIR)/hyperv_stimer.o
>> +
>>   arch_clean:
>>   	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
>>   	$(TEST_DIR)/.*.d lib/x86/.*.d
>> diff --git a/x86/hyperv_stimer.c b/x86/hyperv_stimer.c
>> new file mode 100644
>> index 0000000..e9186ca
>> --- /dev/null
>> +++ b/x86/hyperv_stimer.c
>> @@ -0,0 +1,500 @@
>> +#include "libcflat.h"
>> +#include "processor.h"
>> +#include "msr.h"
>> +#include "isr.h"
>> +#include "vm.h"
>> +#include "apic.h"
>> +#include "desc.h"
>> +#include "io.h"
>> +#include "smp.h"
>> +#include "atomic.h"
>> +
>> +#define MAX_CPUS 4
>> +#define HYPERV_CPUID_FEATURES                   0x40000003
>> +
>> +#define HV_SYNIC_CONTROL_ENABLE                 (1ULL << 0)
>> +#define HV_SYNIC_SIMP_ENABLE                    (1ULL << 0)
>> +#define HV_SYNIC_SIEFP_ENABLE                   (1ULL << 0)
>> +#define HV_SYNIC_SINT_MASKED                    (1ULL << 16)
>> +#define HV_SYNIC_SINT_AUTO_EOI                  (1ULL << 17)
>> +#define HV_SYNIC_SINT_VECTOR_MASK               (0xFF)
>> +#define HV_SYNIC_SINT_COUNT                     16
>> +
>> +#define HV_STIMER_ENABLE                (1ULL << 0)
>> +#define HV_STIMER_PERIODIC              (1ULL << 1)
>> +#define HV_STIMER_LAZY                  (1ULL << 2)
>> +#define HV_STIMER_AUTOENABLE            (1ULL << 3)
>> +#define HV_STIMER_SINT(config)          (__u8)(((config) >> 16) & 0x0F)
>> +
>> +#define HV_SYNIC_STIMER_COUNT           (4)
>> +
>> +/* Define synthetic interrupt controller message constants. */
>> +#define HV_MESSAGE_SIZE                 (256)
>> +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT   (240)
>> +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT  (30)
>> +
>> +/* Define hypervisor message types. */
>> +enum hv_message_type {
>> +        HVMSG_NONE                      = 0x00000000,
>> +
>> +        /* Memory access messages. */
>> +        HVMSG_UNMAPPED_GPA              = 0x80000000,
>> +        HVMSG_GPA_INTERCEPT             = 0x80000001,
>> +
>> +        /* Timer notification messages. */
>> +        HVMSG_TIMER_EXPIRED                     = 0x80000010,
>> +
>> +        /* Error messages. */
>> +        HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
>> +        HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
>> +        HVMSG_UNSUPPORTED_FEATURE               = 0x80000022,
>> +
>> +        /* Trace buffer complete messages. */
>> +        HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
>> +
>> +        /* Platform-specific processor intercept messages. */
>> +        HVMSG_X64_IOPORT_INTERCEPT              = 0x80010000,
>> +        HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
>> +        HVMSG_X64_CPUID_INTERCEPT               = 0x80010002,
>> +        HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
>> +        HVMSG_X64_APIC_EOI                      = 0x80010004,
>> +        HVMSG_X64_LEGACY_FP_ERROR               = 0x80010005
>> +};
>> +
>> +/* Define synthetic interrupt controller message flags. */
>> +union hv_message_flags {
>> +        uint8_t asu8;
>> +        struct {
>> +                uint8_t msg_pending:1;
>> +                uint8_t reserved:7;
>> +        };
>> +};
>> +
>> +union hv_port_id {
>> +        uint32_t asu32;
>> +        struct {
>> +                uint32_t id:24;
>> +                uint32_t reserved:8;
>> +        } u;
>> +};
>> +
>> +/* Define port type. */
>> +enum hv_port_type {
>> +        HVPORT_MSG      = 1,
>> +        HVPORT_EVENT            = 2,
>> +        HVPORT_MONITOR  = 3
>> +};
>> +
>> +/* Define synthetic interrupt controller message header. */
>> +struct hv_message_header {
>> +        enum hv_message_type message_type;
>> +        uint8_t payload_size;
>> +        union hv_message_flags message_flags;
>> +        uint8_t reserved[2];
>> +        union {
>> +                uint64_t sender;
>> +                union hv_port_id port;
>> +        };
>> +};
>> +
>> +/* Define timer message payload structure. */
>> +struct hv_timer_message_payload {
>> +        uint32_t timer_index;
>> +        uint32_t reserved;
>> +        uint64_t expiration_time;       /* When the timer expired */
>> +        uint64_t delivery_time; /* When the message was delivered */
>> +};
>> +
>> +/* Define synthetic interrupt controller message format. */
>> +struct hv_message {
>> +        struct hv_message_header header;
>> +        union {
>> +                uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
>> +        } u;
>> +};
>> +
>> +/* Define the synthetic interrupt message page layout. */
>> +struct hv_message_page {
>> +        struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
>> +};
>> +
>> +enum {
>> +    HV_TEST_DEV_SINT_ROUTE_CREATE = 1,
>> +    HV_TEST_DEV_SINT_ROUTE_DESTROY,
>> +    HV_TEST_DEV_SINT_ROUTE_SET_SINT
>> +};
>> +
>> +static atomic_t g_cpus_comp_count;
>> +static int g_cpus_count;
>> +static struct spinlock g_synic_alloc_lock;
>> +
>> +static bool synic_supported(void)
>> +{
>> +   return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
>> +}
>> +
>> +static bool stimer_supported(void)
>> +{
>> +    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
>> +}
>> +
>> +static bool hv_time_ref_counter_supported(void)
>> +{
>> +    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
>> +}
>> +
>> +static void synic_ctl(u8 ctl, u8 vcpu_id, u8 sint)
>> +{
>> +    outl((ctl << 16)|((vcpu_id) << 8)|sint, 0x3000);
>> +}
>> +
>> +static void synic_sint_create(int sint, int vec, bool auto_eoi)
>> +{
>> +    int vcpu = smp_id();
>> +
>> +    wrmsr(HV_X64_MSR_SINT0 + sint,
>> +          (u64)vec | ((auto_eoi) ? HV_SYNIC_SINT_AUTO_EOI : 0));
>> +    synic_ctl(HV_TEST_DEV_SINT_ROUTE_CREATE, vcpu, sint);
>> +}
>> +
>> +static void synic_sint_destroy(int sint)
>> +{
>> +    int vcpu = smp_id();
>> +
>> +    wrmsr(HV_X64_MSR_SINT0 + sint, 0xFF|HV_SYNIC_SINT_MASKED);
>> +    synic_ctl(HV_TEST_DEV_SINT_ROUTE_DESTROY, vcpu, sint);
>> +}
>> +
>> +struct stimer {
>> +    int sint;
>> +    int index;
>> +    atomic_t fire_count;
>> +};
>> +
>> +struct svcpu {
>> +    int vcpu;
>> +    void *msg_page;
>> +    void *evt_page;
>> +    struct stimer timer[HV_SYNIC_STIMER_COUNT];
>> +};
>> +
>> +static struct svcpu g_synic_vcpu[MAX_CPUS];
>> +
>> +static void *synic_alloc_page(void)
>> +{
>> +    void *page;
>> +
>> +    spin_lock(&g_synic_alloc_lock);
>> +    page = alloc_page();
>> +    spin_unlock(&g_synic_alloc_lock);
>> +    return page;
>> +}
>> +
>> +static void synic_free_page(void *page)
>> +{
>> +    spin_lock(&g_synic_alloc_lock);
>> +    free_page(page);
>> +    spin_unlock(&g_synic_alloc_lock);
>> +}
>> +
>> +static void stimer_init(struct stimer *timer, int index)
>> +{
>> +    memset(timer, 0, sizeof(*timer));
>> +    timer->index = index;
>> +}
>> +
>> +static void synic_enable(void)
>> +{
>> +    int vcpu = smp_id(), i;
>> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
>> +
>> +    memset(svcpu, 0, sizeof(*svcpu));
>> +    svcpu->vcpu = vcpu;
>> +    svcpu->msg_page = synic_alloc_page();
>> +    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
>> +        stimer_init(&svcpu->timer[i], i);
>> +    }
>> +    wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) |
>> +            HV_SYNIC_SIMP_ENABLE);
>> +    wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
>> +}
>> +
>> +static void stimer_shutdown(struct stimer *timer)
>> +{
>> +    wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0);
>> +}
>> +
>> +static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer,
>> +                                   u64 expiration_time, u64 delivery_time)
>> +{
>> +    atomic_inc(&timer->fire_count);
>> +}
>> +
>> +static void process_stimer_msg(struct svcpu *svcpu,
>> +                              struct hv_message *msg, int sint)
>> +{
>> +    struct hv_timer_message_payload *payload =
>> +                        (struct hv_timer_message_payload *)msg->u.payload;
>> +    struct stimer *timer;
>> +
>> +    if (msg->header.message_type != HVMSG_TIMER_EXPIRED &&
>> +        msg->header.message_type != HVMSG_NONE) {
>> +        report("invalid Hyper-V SynIC msg type", false);
>> +        report_summary();
>> +        exit(-1);
>> +        return;
>> +    }
>> +
>> +    if (msg->header.message_type == HVMSG_NONE) {
>> +        return;
>> +    }
>> +
>> +    if (msg->header.payload_size < sizeof(*payload)) {
>> +        report("invalid Hyper-V SynIC msg payload size", false);
>> +        report_summary();
>> +        exit(-1);
>> +        return;
>> +    }
>> +
>> +    /* Now process timer expiration message */
>> +
>> +    if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) {
>> +        report("invalid Hyper-V SynIC timer index", false);
>> +        report_summary();
>> +        exit(-1);
>> +        return;
>> +    }
>> +    timer = &svcpu->timer[payload->timer_index];
>> +    process_stimer_expired(svcpu, timer, payload->expiration_time,
>> +                          payload->delivery_time);
>> +
>> +    msg->header.message_type = HVMSG_NONE;
>> +    mb();
>> +    if (msg->header.message_flags.msg_pending) {
>> +        wrmsr(HV_X64_MSR_EOM, 0);
>> +    }
>> +}
>> +
>> +static void __stimer_isr(int vcpu)
>> +{
>> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
>> +    struct hv_message_page *msg_page;
>> +    struct hv_message *msg;
>> +    int i;
>> +
>> +
>> +    msg_page = (struct hv_message_page *)svcpu->msg_page;
>> +    for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) {
>> +        msg = &msg_page->sint_message[i];
>> +        process_stimer_msg(svcpu, msg, i);
>> +    }
>> +}
>> +
>> +static void stimer_isr(isr_regs_t *regs)
>> +{
>> +    int vcpu = smp_id();
>> +
>> +    __stimer_isr(vcpu);
>> +    eoi();
>> +}
>> +
>> +static void stimer_isr_auto_eoi(isr_regs_t *regs)
>> +{
>> +    int vcpu = smp_id();
>> +
>> +    __stimer_isr(vcpu);
>> +}
>> +
>> +static void stimer_start(struct stimer *timer,
>> +                         bool auto_enable, bool periodic,
>> +                         u64 tick_100ns, int sint)
>> +{
>> +    u64 config, count;
>> +
>> +    timer->sint = sint;
>> +    atomic_set(&timer->fire_count, 0);
>> +
>> +    config = 0;
>> +    if (periodic) {
>> +        config |= HV_STIMER_PERIODIC;
>> +    }
>> +
>> +    config |= ((u8)(sint & 0xFF)) << 16;
>> +    config |= HV_STIMER_ENABLE;
>> +    if (auto_enable) {
>> +        config |= HV_STIMER_AUTOENABLE;
>> +    }
>> +
>> +    if (periodic) {
>> +        count = tick_100ns;
>> +    } else {
>> +        count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns;
>> +    }
>> +
>> +    if (!auto_enable) {
>> +        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
>> +        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
>> +    } else {
>> +        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
>> +        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
>> +    }
>> +}
>> +
>> +static void stimers_shutdown(void)
>> +{
>> +    int vcpu = smp_id(), i;
>> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
>> +
>> +    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
>> +        stimer_shutdown(&svcpu->timer[i]);
>> +    }
>> +}
>> +
>> +static void synic_disable(void)
>> +{
>> +    int vcpu = smp_id();
>> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
>> +
>> +    wrmsr(HV_X64_MSR_SCONTROL, 0);
>> +    wrmsr(HV_X64_MSR_SIMP, 0);
>> +    wrmsr(HV_X64_MSR_SIEFP, 0);
>> +    synic_free_page(svcpu->msg_page);
>> +}
>> +
>> +static void cpu_comp(void)
>> +{
>> +    atomic_inc(&g_cpus_comp_count);
>> +}
>> +
>> +static void stimer_test_prepare(void *ctx)
>> +{
>> +    write_cr3((ulong)ctx);
>> +    irq_enable();
>> +    synic_enable();
>> +    cpu_comp();
>> +}
>> +
>> +static void stimer_test(void *ctx)
>> +{
>> +    int vcpu = smp_id();
>> +    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
>> +    struct stimer *timer1, *timer2;
>> +    int sint1, sint2;
>> +    u64 one_ms_in_100ns = 10000;
>> +    irq_enable();
>
> Unnecessary.
ok
>
>> +    sint1 = 2;
>> +    sint2 = 3;
>
> Please make these #defines.
ok
>
>> +    synic_sint_create(sint1, 0xF1, false);
>> +    synic_sint_create(sint2, 0xF2, true);
>
> Please move this to stimer_test_prepare...
ok
>
>> +    timer1 = &svcpu->timer[0];
>> +    timer2 = &svcpu->timer[1];
>> +
>> +    /* Check periodic timers */
>> +    stimer_start(timer1, false, true, one_ms_in_100ns, sint1);
>> +    stimer_start(timer2, false, true, one_ms_in_100ns, sint2);
>> +    while ((atomic_read(&timer1->fire_count) < 1000) ||
>> +           (atomic_read(&timer2->fire_count) < 1000)) {
>> +        pause();
>> +    }
>> +    stimer_shutdown(timer1);
>> +    stimer_shutdown(timer2);
>
> ... so that the subtests (between stimer_start and stimer_shutdown) can
> each be a separate callback and a separate report() call.
ok
>
>> +    /* Check one-shot timer */
>> +    stimer_start(timer1, false, false, one_ms_in_100ns, sint1);
>> +    while (atomic_read(&timer1->fire_count) < 1) {
>> +        pause();
>> +    }
>> +    stimer_shutdown(timer1);
>> +
>> +    /* Check auto-enable one-shot timer */
>> +    stimer_start(timer1, true, false, one_ms_in_100ns, sint1);
>> +    while (atomic_read(&timer1->fire_count) < 1) {
>> +        pause();
>> +    }
>> +    stimer_shutdown(timer1);
>> +
>> +    /* Check auto-enable periodic timer */
>> +    stimer_start(timer1, true, true, one_ms_in_100ns, sint1);
>> +    while (atomic_read(&timer1->fire_count) < 1000) {
>> +        pause();
>> +    }
>> +    stimer_shutdown(timer1);
>> +
>> +
>> +    synic_sint_destroy(sint1);
>> +    synic_sint_destroy(sint2);
>
> These go in stimer_test_cleanup.
ok
>
>> +    cpu_comp();
>> +}
>> +
>> +static void stimer_test_cleanup(void *ctx)
>> +{
>> +    irq_enable();
>
> Why enable again?
I'll remove it.
>
>> +    stimers_shutdown();
>> +    synic_disable();
>> +    cpu_comp();
>> +}
>> +
>> +static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx)
>> +{
>> +    int i;
>> +
>> +    atomic_set(&g_cpus_comp_count, 0);
>> +    for (i = 0; i < g_cpus_count; i++) {
>> +        on_cpu_async(i, func, ctx);
>> +    }
>> +    while (atomic_read(&g_cpus_comp_count) != g_cpus_count) {
>> +        pause();
>> +    }
>> +}
>> +
>> +static bool stimer_test_all(void)
>> +{
>> +    int ncpus;
>> +
>> +    setup_vm();
>> +    smp_init();
>> +    setup_idt();
>> +    enable_apic();
>> +
>> +    handle_irq(0xF1, stimer_isr);
>> +    handle_irq(0xF2, stimer_isr_auto_eoi);
>> +
>> +    ncpus = cpu_count();
>> +    if (ncpus > MAX_CPUS) {
>> +        ncpus = MAX_CPUS;
>> +    }
>> +    printf("cpus = %d\n", ncpus);
>> +    g_cpus_count = ncpus;
>> +
>> +    on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3());
>> +    on_each_cpu_async_wait(stimer_test, NULL);
>> +    on_each_cpu_async_wait(stimer_test_cleanup, NULL);
>> +
>> +    return true;
>> +}
>> +
>> +int main(int ac, char **av)
>> +{
>> +
>> +    if (!synic_supported()) {
>> +        report("Hyper-V SynIC is not supported", true);
>> +        goto done;
>> +    }
>> +
>> +    if (!stimer_supported()) {
>> +        report("Hyper-V SynIC timers are not supported", true);
>> +        goto done;
>> +    }
>> +
>> +    if (!hv_time_ref_counter_supported()) {
>> +        report("Hyper-V time reference counter is not supported", true);
>> +        goto done;
>> +    }
>> +    report("Hyper-V SynIC timers test", stimer_test_all());
>
> You can move report() in stimer_test_all(), since you'll have many of them.
ok, will do
>
> Thanks,
>
> Paolo
>
>> +done:
>> +    return report_summary();
>> +}
>> diff --git a/x86/unittests.cfg b/x86/unittests.cfg
>> index ffffc15..dc93451 100644
>> --- a/x86/unittests.cfg
>> +++ b/x86/unittests.cfg
>> @@ -183,3 +183,8 @@ arch = x86_64
>>   file = hyperv_synic.flat
>>   smp = 2
>>   extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
>> +
>> +[hyperv_stimer]
>> +file = hyperv_stimer.flat
>> +smp = 2
>> +extra_params = -cpu host,hv_time,hv_synic,hv_stimer -device hyperv-testdev
>>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini Nov. 27, 2015, 12:12 p.m. UTC | #3
On 27/11/2015 12:30, Andrey Smetanin wrote:
>>>
>>> +
>>> +static void stimer_test_cleanup(void *ctx)
>>> +{
>>> +    irq_enable();
>>
>> Why enable again?
> I'll remove it.

I guess you can remove the one in stimer_test_prepare too.  If the
interrupts are disabled you don't get the IPI either, do you?

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/config/config-x86-common.mak b/config/config-x86-common.mak
index f64874d..a75be87 100644
--- a/config/config-x86-common.mak
+++ b/config/config-x86-common.mak
@@ -37,7 +37,7 @@  tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \
                $(TEST_DIR)/s3.flat $(TEST_DIR)/pmu.flat \
                $(TEST_DIR)/tsc_adjust.flat $(TEST_DIR)/asyncpf.flat \
                $(TEST_DIR)/init.flat $(TEST_DIR)/smap.flat \
-               $(TEST_DIR)/hyperv_synic.flat
+               $(TEST_DIR)/hyperv_synic.flat $(TEST_DIR)/hyperv_stimer.flat \
 
 ifdef API
 tests-common += api/api-sample
@@ -115,6 +115,8 @@  $(TEST_DIR)/memory.elf: $(cstart.o) $(TEST_DIR)/memory.o
 
 $(TEST_DIR)/hyperv_synic.elf: $(cstart.o) $(TEST_DIR)/hyperv_synic.o
 
+$(TEST_DIR)/hyperv_stimer.elf: $(cstart.o) $(TEST_DIR)/hyperv_stimer.o
+
 arch_clean:
 	$(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat $(TEST_DIR)/*.elf \
 	$(TEST_DIR)/.*.d lib/x86/.*.d
diff --git a/x86/hyperv_stimer.c b/x86/hyperv_stimer.c
new file mode 100644
index 0000000..e9186ca
--- /dev/null
+++ b/x86/hyperv_stimer.c
@@ -0,0 +1,500 @@ 
+#include "libcflat.h"
+#include "processor.h"
+#include "msr.h"
+#include "isr.h"
+#include "vm.h"
+#include "apic.h"
+#include "desc.h"
+#include "io.h"
+#include "smp.h"
+#include "atomic.h"
+
+#define MAX_CPUS 4
+#define HYPERV_CPUID_FEATURES                   0x40000003
+
+#define HV_SYNIC_CONTROL_ENABLE                 (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE                    (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE                   (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED                    (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI                  (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK               (0xFF)
+#define HV_SYNIC_SINT_COUNT                     16
+
+#define HV_STIMER_ENABLE                (1ULL << 0)
+#define HV_STIMER_PERIODIC              (1ULL << 1)
+#define HV_STIMER_LAZY                  (1ULL << 2)
+#define HV_STIMER_AUTOENABLE            (1ULL << 3)
+#define HV_STIMER_SINT(config)          (__u8)(((config) >> 16) & 0x0F)
+
+#define HV_SYNIC_STIMER_COUNT           (4)
+
+/* Define synthetic interrupt controller message constants. */
+#define HV_MESSAGE_SIZE                 (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT   (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT  (30)
+
+/* Define hypervisor message types. */
+enum hv_message_type {
+        HVMSG_NONE                      = 0x00000000,
+
+        /* Memory access messages. */
+        HVMSG_UNMAPPED_GPA              = 0x80000000,
+        HVMSG_GPA_INTERCEPT             = 0x80000001,
+
+        /* Timer notification messages. */
+        HVMSG_TIMER_EXPIRED                     = 0x80000010,
+
+        /* Error messages. */
+        HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+        HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
+        HVMSG_UNSUPPORTED_FEATURE               = 0x80000022,
+
+        /* Trace buffer complete messages. */
+        HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
+
+        /* Platform-specific processor intercept messages. */
+        HVMSG_X64_IOPORT_INTERCEPT              = 0x80010000,
+        HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
+        HVMSG_X64_CPUID_INTERCEPT               = 0x80010002,
+        HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
+        HVMSG_X64_APIC_EOI                      = 0x80010004,
+        HVMSG_X64_LEGACY_FP_ERROR               = 0x80010005
+};
+
+/* Define synthetic interrupt controller message flags. */
+union hv_message_flags {
+        uint8_t asu8;
+        struct {
+                uint8_t msg_pending:1;
+                uint8_t reserved:7;
+        };
+};
+
+union hv_port_id {
+        uint32_t asu32;
+        struct {
+                uint32_t id:24;
+                uint32_t reserved:8;
+        } u;
+};
+
+/* Define port type. */
+enum hv_port_type {
+        HVPORT_MSG      = 1,
+        HVPORT_EVENT            = 2,
+        HVPORT_MONITOR  = 3
+};
+
+/* Define synthetic interrupt controller message header. */
+struct hv_message_header {
+        enum hv_message_type message_type;
+        uint8_t payload_size;
+        union hv_message_flags message_flags;
+        uint8_t reserved[2];
+        union {
+                uint64_t sender;
+                union hv_port_id port;
+        };
+};
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+        uint32_t timer_index;
+        uint32_t reserved;
+        uint64_t expiration_time;       /* When the timer expired */
+        uint64_t delivery_time; /* When the message was delivered */
+};
+
+/* Define synthetic interrupt controller message format. */
+struct hv_message {
+        struct hv_message_header header;
+        union {
+                uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+        } u;
+};
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+        struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+};
+
+enum {
+    HV_TEST_DEV_SINT_ROUTE_CREATE = 1,
+    HV_TEST_DEV_SINT_ROUTE_DESTROY,
+    HV_TEST_DEV_SINT_ROUTE_SET_SINT
+};
+
+static atomic_t g_cpus_comp_count;
+static int g_cpus_count;
+static struct spinlock g_synic_alloc_lock;
+
+static bool synic_supported(void)
+{
+   return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
+}
+
+static bool stimer_supported(void)
+{
+    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_SYNIC_AVAILABLE;
+}
+
+static bool hv_time_ref_counter_supported(void)
+{
+    return cpuid(HYPERV_CPUID_FEATURES).a & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+}
+
+static void synic_ctl(u8 ctl, u8 vcpu_id, u8 sint)
+{
+    outl((ctl << 16)|((vcpu_id) << 8)|sint, 0x3000);
+}
+
+static void synic_sint_create(int sint, int vec, bool auto_eoi)
+{
+    int vcpu = smp_id();
+
+    wrmsr(HV_X64_MSR_SINT0 + sint,
+          (u64)vec | ((auto_eoi) ? HV_SYNIC_SINT_AUTO_EOI : 0));
+    synic_ctl(HV_TEST_DEV_SINT_ROUTE_CREATE, vcpu, sint);
+}
+
+static void synic_sint_destroy(int sint)
+{
+    int vcpu = smp_id();
+
+    wrmsr(HV_X64_MSR_SINT0 + sint, 0xFF|HV_SYNIC_SINT_MASKED);
+    synic_ctl(HV_TEST_DEV_SINT_ROUTE_DESTROY, vcpu, sint);
+}
+
+struct stimer {
+    int sint;
+    int index;
+    atomic_t fire_count;
+};
+
+struct svcpu {
+    int vcpu;
+    void *msg_page;
+    void *evt_page;
+    struct stimer timer[HV_SYNIC_STIMER_COUNT];
+};
+
+static struct svcpu g_synic_vcpu[MAX_CPUS];
+
+static void *synic_alloc_page(void)
+{
+    void *page;
+
+    spin_lock(&g_synic_alloc_lock);
+    page = alloc_page();
+    spin_unlock(&g_synic_alloc_lock);
+    return page;
+}
+
+static void synic_free_page(void *page)
+{
+    spin_lock(&g_synic_alloc_lock);
+    free_page(page);
+    spin_unlock(&g_synic_alloc_lock);
+}
+
+static void stimer_init(struct stimer *timer, int index)
+{
+    memset(timer, 0, sizeof(*timer));
+    timer->index = index;
+}
+
+static void synic_enable(void)
+{
+    int vcpu = smp_id(), i;
+    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+    memset(svcpu, 0, sizeof(*svcpu));
+    svcpu->vcpu = vcpu;
+    svcpu->msg_page = synic_alloc_page();
+    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
+        stimer_init(&svcpu->timer[i], i);
+    }
+    wrmsr(HV_X64_MSR_SIMP, (u64)virt_to_phys(svcpu->msg_page) |
+            HV_SYNIC_SIMP_ENABLE);
+    wrmsr(HV_X64_MSR_SCONTROL, HV_SYNIC_CONTROL_ENABLE);
+}
+
+static void stimer_shutdown(struct stimer *timer)
+{
+    wrmsr(HV_X64_MSR_STIMER0_CONFIG + 2*timer->index, 0);
+}
+
+static void process_stimer_expired(struct svcpu *svcpu, struct stimer *timer,
+                                   u64 expiration_time, u64 delivery_time)
+{
+    atomic_inc(&timer->fire_count);
+}
+
+static void process_stimer_msg(struct svcpu *svcpu,
+                              struct hv_message *msg, int sint)
+{
+    struct hv_timer_message_payload *payload =
+                        (struct hv_timer_message_payload *)msg->u.payload;
+    struct stimer *timer;
+
+    if (msg->header.message_type != HVMSG_TIMER_EXPIRED &&
+        msg->header.message_type != HVMSG_NONE) {
+        report("invalid Hyper-V SynIC msg type", false);
+        report_summary();
+        exit(-1);
+        return;
+    }
+
+    if (msg->header.message_type == HVMSG_NONE) {
+        return;
+    }
+
+    if (msg->header.payload_size < sizeof(*payload)) {
+        report("invalid Hyper-V SynIC msg payload size", false);
+        report_summary();
+        exit(-1);
+        return;
+    }
+
+    /* Now process timer expiration message */
+
+    if (payload->timer_index >= ARRAY_SIZE(svcpu->timer)) {
+        report("invalid Hyper-V SynIC timer index", false);
+        report_summary();
+        exit(-1);
+        return;
+    }
+    timer = &svcpu->timer[payload->timer_index];
+    process_stimer_expired(svcpu, timer, payload->expiration_time,
+                          payload->delivery_time);
+
+    msg->header.message_type = HVMSG_NONE;
+    mb();
+    if (msg->header.message_flags.msg_pending) {
+        wrmsr(HV_X64_MSR_EOM, 0);
+    }
+}
+
+static void __stimer_isr(int vcpu)
+{
+    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+    struct hv_message_page *msg_page;
+    struct hv_message *msg;
+    int i;
+
+
+    msg_page = (struct hv_message_page *)svcpu->msg_page;
+    for (i = 0; i < ARRAY_SIZE(msg_page->sint_message); i++) {
+        msg = &msg_page->sint_message[i];
+        process_stimer_msg(svcpu, msg, i);
+    }
+}
+
+static void stimer_isr(isr_regs_t *regs)
+{
+    int vcpu = smp_id();
+
+    __stimer_isr(vcpu);
+    eoi();
+}
+
+static void stimer_isr_auto_eoi(isr_regs_t *regs)
+{
+    int vcpu = smp_id();
+
+    __stimer_isr(vcpu);
+}
+
+static void stimer_start(struct stimer *timer,
+                         bool auto_enable, bool periodic,
+                         u64 tick_100ns, int sint)
+{
+    u64 config, count;
+
+    timer->sint = sint;
+    atomic_set(&timer->fire_count, 0);
+
+    config = 0;
+    if (periodic) {
+        config |= HV_STIMER_PERIODIC;
+    }
+
+    config |= ((u8)(sint & 0xFF)) << 16;
+    config |= HV_STIMER_ENABLE;
+    if (auto_enable) {
+        config |= HV_STIMER_AUTOENABLE;
+    }
+
+    if (periodic) {
+        count = tick_100ns;
+    } else {
+        count = rdmsr(HV_X64_MSR_TIME_REF_COUNT) + tick_100ns;
+    }
+
+    if (!auto_enable) {
+        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
+        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
+    } else {
+        wrmsr(HV_X64_MSR_STIMER0_CONFIG + timer->index*2, config);
+        wrmsr(HV_X64_MSR_STIMER0_COUNT + timer->index*2, count);
+    }
+}
+
+static void stimers_shutdown(void)
+{
+    int vcpu = smp_id(), i;
+    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+    for (i = 0; i < ARRAY_SIZE(svcpu->timer); i++) {
+        stimer_shutdown(&svcpu->timer[i]);
+    }
+}
+
+static void synic_disable(void)
+{
+    int vcpu = smp_id();
+    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+
+    wrmsr(HV_X64_MSR_SCONTROL, 0);
+    wrmsr(HV_X64_MSR_SIMP, 0);
+    wrmsr(HV_X64_MSR_SIEFP, 0);
+    synic_free_page(svcpu->msg_page);
+}
+
+static void cpu_comp(void)
+{
+    atomic_inc(&g_cpus_comp_count);
+}
+
+static void stimer_test_prepare(void *ctx)
+{
+    write_cr3((ulong)ctx);
+    irq_enable();
+    synic_enable();
+    cpu_comp();
+}
+
+static void stimer_test(void *ctx)
+{
+    int vcpu = smp_id();
+    struct svcpu *svcpu = &g_synic_vcpu[vcpu];
+    struct stimer *timer1, *timer2;
+    int sint1, sint2;
+    u64 one_ms_in_100ns = 10000;
+    irq_enable();
+
+    sint1 = 2;
+    sint2 = 3;
+
+    synic_sint_create(sint1, 0xF1, false);
+    synic_sint_create(sint2, 0xF2, true);
+
+    timer1 = &svcpu->timer[0];
+    timer2 = &svcpu->timer[1];
+
+    /* Check periodic timers */
+    stimer_start(timer1, false, true, one_ms_in_100ns, sint1);
+    stimer_start(timer2, false, true, one_ms_in_100ns, sint2);
+    while ((atomic_read(&timer1->fire_count) < 1000) ||
+           (atomic_read(&timer2->fire_count) < 1000)) {
+        pause();
+    }
+    stimer_shutdown(timer1);
+    stimer_shutdown(timer2);
+
+    /* Check one-shot timer */
+    stimer_start(timer1, false, false, one_ms_in_100ns, sint1);
+    while (atomic_read(&timer1->fire_count) < 1) {
+        pause();
+    }
+    stimer_shutdown(timer1);
+
+    /* Check auto-enable one-shot timer */
+    stimer_start(timer1, true, false, one_ms_in_100ns, sint1);
+    while (atomic_read(&timer1->fire_count) < 1) {
+        pause();
+    }
+    stimer_shutdown(timer1);
+
+    /* Check auto-enable periodic timer */
+    stimer_start(timer1, true, true, one_ms_in_100ns, sint1);
+    while (atomic_read(&timer1->fire_count) < 1000) {
+        pause();
+    }
+    stimer_shutdown(timer1);
+
+
+    synic_sint_destroy(sint1);
+    synic_sint_destroy(sint2);
+
+    cpu_comp();
+}
+
+static void stimer_test_cleanup(void *ctx)
+{
+    irq_enable();
+    stimers_shutdown();
+    synic_disable();
+    cpu_comp();
+}
+
+static void on_each_cpu_async_wait(void (*func)(void *ctx), void *ctx)
+{
+    int i;
+
+    atomic_set(&g_cpus_comp_count, 0);
+    for (i = 0; i < g_cpus_count; i++) {
+        on_cpu_async(i, func, ctx);
+    }
+    while (atomic_read(&g_cpus_comp_count) != g_cpus_count) {
+        pause();
+    }
+}
+
+static bool stimer_test_all(void)
+{
+    int ncpus;
+
+    setup_vm();
+    smp_init();
+    setup_idt();
+    enable_apic();
+
+    handle_irq(0xF1, stimer_isr);
+    handle_irq(0xF2, stimer_isr_auto_eoi);
+
+    ncpus = cpu_count();
+    if (ncpus > MAX_CPUS) {
+        ncpus = MAX_CPUS;
+    }
+    printf("cpus = %d\n", ncpus);
+    g_cpus_count = ncpus;
+
+    on_each_cpu_async_wait(stimer_test_prepare, (void *)read_cr3());
+    on_each_cpu_async_wait(stimer_test, NULL);
+    on_each_cpu_async_wait(stimer_test_cleanup, NULL);
+
+    return true;
+}
+
+int main(int ac, char **av)
+{
+
+    if (!synic_supported()) {
+        report("Hyper-V SynIC is not supported", true);
+        goto done;
+    }
+
+    if (!stimer_supported()) {
+        report("Hyper-V SynIC timers are not supported", true);
+        goto done;
+    }
+
+    if (!hv_time_ref_counter_supported()) {
+        report("Hyper-V time reference counter is not supported", true);
+        goto done;
+    }
+    report("Hyper-V SynIC timers test", stimer_test_all());
+done:
+    return report_summary();
+}
diff --git a/x86/unittests.cfg b/x86/unittests.cfg
index ffffc15..dc93451 100644
--- a/x86/unittests.cfg
+++ b/x86/unittests.cfg
@@ -183,3 +183,8 @@  arch = x86_64
 file = hyperv_synic.flat
 smp = 2
 extra_params = -cpu kvm64,hv_synic -device hyperv-testdev
+
+[hyperv_stimer]
+file = hyperv_stimer.flat
+smp = 2
+extra_params = -cpu host,hv_time,hv_synic,hv_stimer -device hyperv-testdev