From patchwork Fri Aug 27 05:49:53 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jason Wang X-Patchwork-Id: 136681 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o7R5o04V002846 for ; Fri, 27 Aug 2010 05:50:00 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753335Ab0H0Ft6 (ORCPT ); Fri, 27 Aug 2010 01:49:58 -0400 Received: from mx1.redhat.com ([209.132.183.28]:39959 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753094Ab0H0Ft6 (ORCPT ); Fri, 27 Aug 2010 01:49:58 -0400 Received: from int-mx03.intmail.prod.int.phx2.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.16]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o7R5nvQ3024598 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Fri, 27 Aug 2010 01:49:57 -0400 Received: from [127.0.1.1] (dhcp-65-37.nay.redhat.com [10.66.65.37]) by int-mx03.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o7R5nsXi009206; Fri, 27 Aug 2010 01:49:55 -0400 Subject: [PATCH kvm-unit-test 6/6] Add a test for kvm-clock To: mtosatti@redhat.com, avi@redhat.com, kvm@vger.kernel.org From: Jason Wang Cc: glommer@redhat.com Date: Fri, 27 Aug 2010 13:49:53 +0800 Message-ID: <20100827054953.7409.25948.stgit@FreeLancer> In-Reply-To: <20100827054733.7409.63882.stgit@FreeLancer> References: <20100827054733.7409.63882.stgit@FreeLancer> User-Agent: StGit/0.15 MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.67 on 10.5.11.16 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Fri, 27 Aug 2010 05:50:01 +0000 (UTC) diff --git a/config-x86-common.mak b/config-x86-common.mak index b8ca859..b541c1c 100644 --- a/config-x86-common.mak +++ b/config-x86-common.mak @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc) tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \ $(TEST_DIR)/smptest.flat $(TEST_DIR)/port80.flat \ $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \ - $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat + $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \ + $(TEST_DIR)/kvmclock_test.flat tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \ $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \ + $(TEST_DIR)/kvmclock_test.o + arch_clean: $(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \ $(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o diff --git a/x86/README b/x86/README index ab5a2ae..4b90080 100644 --- a/x86/README +++ b/x86/README @@ -12,3 +12,5 @@ sieve: heavy memory access with no paging and with paging static and with paging smptest: run smp_id() on every cpu and compares return value to number tsc: write to tsc(0) and write to tsc(100000000000) and read it back vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt +kvmclock_test: monotonic cycle test of kvmclock and a sanity test of +wallclock diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c new file mode 100644 index 0000000..cd80915 --- /dev/null +++ b/x86/kvmclock_test.c @@ -0,0 +1,145 @@ +#include "libcflat.h" +#include "smp.h" +#include "atomic.h" +#include "string.h" +#include "kvmclock.h" + +#define DEFAULT_TEST_LOOPS 100000000L +#define DEFAULT_THRESHOLD 60L + +struct test_info { + struct spinlock lock; + long loops; /* test loops */ + u64 warps; /* warp count */ + long long worst; /* worst warp */ + volatile cycle_t last; /* last cycle seen by test */ + atomic_t ncpus; /* number of cpu in the test*/ +}; + +struct test_info ti[2]; + +static int wallclock_test(long sec, long threshold) +{ + int i; + long ksec, offset; + struct timespec ts, ts_last; + + printf("Wallclock test, threshold %ld\n", threshold); + kvm_get_wallclock(&ts_last); + ksec = ts_last.sec + ts_last.nsec / NSEC_PER_SEC; + + offset = ksec - sec; + printf("Seconds get from host: %ld\n", sec); + printf("Seconds get from kvmclock: %ld\n", ksec); + + if (offset > threshold || offset < -threshold) { + printf("Seconds get from kvmclock: %ld\n", ksec); + return 1; + } + + for (i=0; i < 100; i++){ + kvm_get_wallclock(&ts); + if (ts.nsec != ts_last.nsec || ts.sec != ts_last.sec){ + printf ("Inconsistent wall clock returned!\n"); + return 1; + } + } + return 0; +} + +static void kvm_clock_test(void *data) +{ + struct test_info *hv_test_info = (struct test_info *)data; + int i; + + for (i = 0; i < hv_test_info->loops; i++){ + cycle_t t0, t1; + long long delta; + + spin_lock(&hv_test_info->lock); + t1 = kvm_clock_read(); + t0 = hv_test_info->last; + hv_test_info->last = kvm_clock_read(); + spin_unlock(&hv_test_info->lock); + + delta = t1 - t0; + if (delta < 0){ + spin_lock(&hv_test_info->lock); + ++hv_test_info->warps; + if (delta < hv_test_info->worst){ + hv_test_info->worst = delta; + printf("Worst warp %lld %\n", hv_test_info->worst); + } + spin_unlock(&hv_test_info->lock); + } + + if (!((unsigned long)i & 31)) + asm volatile("rep; nop"); + } + + atomic_dec(&hv_test_info->ncpus); +} + +static int cycle_test(int ncpus, long loops, struct test_info *ti) +{ + int i; + + atomic_set(&ti->ncpus, ncpus); + ti->loops = loops; + for (i = ncpus - 1; i >= 0; i--) + on_cpu_async(i, kvm_clock_test, (void *)ti); + + /* Wait for the end of other vcpu */ + while(atomic_read(&ti->ncpus)) + ; + + printf("Total vcpus: %d\n", ncpus); + printf("Test loops: %ld\n", ti->loops); + printf("Total warps: %lld\n", ti->warps); + printf("Worst warp: %lld\n", ti->worst); + + return ti->warps ? 1 : 0; +} + +int main(int ac, char **av) +{ + int ncpus = cpu_count(); + int nerr = 0, i; + long loops = DEFAULT_TEST_LOOPS; + long sec = 0; + long threshold = DEFAULT_THRESHOLD; + + if (ac > 1) + loops = atol(av[1]); + if (ac > 2) + sec = atol(av[2]); + if (ac > 3) + threshold = atol(av[3]); + + smp_init(); + + if (ncpus > MAX_CPU) + ncpus = MAX_CPU; + for (i = 0; i < ncpus; ++i) + on_cpu(i, kvm_clock_init, (void *)0); + + if (ac > 2) + nerr += wallclock_test(sec, threshold); + + printf("Check the stability of raw cycle\n"); + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT + | PVCLOCK_RAW_CYCLE_BIT); + if (cycle_test(ncpus, loops, &ti[1])) + printf("Raw cycle is not stable\n"); + else + printf("Raw cycle is stable\n"); + + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); + printf("Monotonic cycle test:\n"); + nerr += cycle_test(ncpus, loops, &ti[0]); + + for (i = 0; i < ncpus; ++i) + on_cpu(i, kvm_clock_clear, (void *)0); + + return nerr > 0 ? 1 : 0; +} diff --git a/x86/unittests.cfg b/x86/unittests.cfg index 7796e41..a3290cd 100644 --- a/x86/unittests.cfg +++ b/x86/unittests.cfg @@ -63,3 +63,8 @@ extra_params = -enable-nesting -cpu qemu64,+svm file = svm.flat smp = 2 extra_params = -cpu qemu64,-svm + +[kvmclock_test] +file = kvmclock_test.flat +smp = 2 +extra_params = --append "10000000 `date +%s`"