From patchwork Tue Aug 31 08:37:49 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jason Wang X-Patchwork-Id: 144761 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o7V8c69L009107 for ; Tue, 31 Aug 2010 08:38:12 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752884Ab0HaIhy (ORCPT ); Tue, 31 Aug 2010 04:37:54 -0400 Received: from mx1.redhat.com ([209.132.183.28]:26536 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753042Ab0HaIhw (ORCPT ); Tue, 31 Aug 2010 04:37:52 -0400 Received: from int-mx08.intmail.prod.int.phx2.redhat.com (int-mx08.intmail.prod.int.phx2.redhat.com [10.5.11.21]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id o7V8bq5L000878 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK) for ; Tue, 31 Aug 2010 04:37:52 -0400 Received: from [127.0.1.1] (dhcp-65-37.nay.redhat.com [10.66.65.37]) by int-mx08.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id o7V8bnEK010969; Tue, 31 Aug 2010 04:37:50 -0400 Subject: [PATCH kvm-unit-tests v2 8/8] Add tests for kvm-clock To: zamsden@redhat.com, glommer@redhat.com, mtosatti@redhat.com, avi@redhat.com, kvm@vger.kernel.org From: Jason Wang Date: Tue, 31 Aug 2010 16:37:49 +0800 Message-ID: <20100831083749.10672.68826.stgit@FreeLancer> In-Reply-To: <20100831083216.10672.20413.stgit@FreeLancer> References: <20100831083216.10672.20413.stgit@FreeLancer> User-Agent: StGit/0.15 MIME-Version: 1.0 X-Scanned-By: MIMEDefang 2.67 on 10.5.11.21 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Tue, 31 Aug 2010 08:38:12 +0000 (UTC) diff --git a/config-x86-common.mak b/config-x86-common.mak index b8ca859..b541c1c 100644 --- a/config-x86-common.mak +++ b/config-x86-common.mak @@ -26,7 +26,8 @@ FLATLIBS = lib/libcflat.a $(libgcc) tests-common = $(TEST_DIR)/vmexit.flat $(TEST_DIR)/tsc.flat \ $(TEST_DIR)/smptest.flat $(TEST_DIR)/port80.flat \ $(TEST_DIR)/realmode.flat $(TEST_DIR)/msr.flat \ - $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat + $(TEST_DIR)/hypercall.flat $(TEST_DIR)/sieve.flat \ + $(TEST_DIR)/kvmclock_test.flat tests_and_config = $(TEST_DIR)/*.flat $(TEST_DIR)/unittests.cfg @@ -70,6 +71,9 @@ $(TEST_DIR)/rmap_chain.flat: $(cstart.o) $(TEST_DIR)/rmap_chain.o \ $(TEST_DIR)/svm.flat: $(cstart.o) $(TEST_DIR)/vm.o +$(TEST_DIR)/kvmclock_test.flat: $(cstart.o) $(TEST_DIR)/kvmclock.o \ + $(TEST_DIR)/kvmclock_test.o + arch_clean: $(RM) $(TEST_DIR)/*.o $(TEST_DIR)/*.flat \ $(TEST_DIR)/.*.d $(TEST_DIR)/lib/.*.d $(TEST_DIR)/lib/*.o diff --git a/x86/README b/x86/README index ab5a2ae..215fb2f 100644 --- a/x86/README +++ b/x86/README @@ -12,3 +12,4 @@ sieve: heavy memory access with no paging and with paging static and with paging smptest: run smp_id() on every cpu and compares return value to number tsc: write to tsc(0) and write to tsc(100000000000) and read it back vmexit: long loops for each: cpuid, vmcall, mov_from_cr8, mov_to_cr8, inl_pmtimer, ipi, ipi+halt +kvmclock_test: test of wallclock, monotonic cycle and performance of kvmclock diff --git a/x86/kvmclock_test.c b/x86/kvmclock_test.c new file mode 100644 index 0000000..5b14ae2 --- /dev/null +++ b/x86/kvmclock_test.c @@ -0,0 +1,166 @@ +#include "libcflat.h" +#include "smp.h" +#include "atomic.h" +#include "processor.h" +#include "kvmclock.h" + +#define DEFAULT_TEST_LOOPS 100000000L +#define DEFAULT_THRESHOLD 5L + +struct test_info { + struct spinlock lock; + long loops; /* test loops */ + u64 warps; /* warp count */ + u64 stalls; /* stall count */ + long long worst; /* worst warp */ + volatile cycle_t last; /* last cycle seen by test */ + atomic_t ncpus; /* number of cpu in the test*/ + int check; /* check cycle ? */ +}; + +struct test_info ti[4]; + +static int wallclock_test(long sec, long threshold) +{ + long ksec, offset; + struct timespec ts; + + printf("Wallclock test, threshold %ld\n", threshold); + kvm_get_wallclock(&ts); + ksec = ts.tv_sec; + + offset = ksec - sec; + printf("Seconds get from host: %ld\n", sec); + printf("Seconds get from kvmclock: %ld\n", ksec); + printf("Offset: %ld\n", offset); + + if (offset > threshold || offset < -threshold) { + printf("offset too large!\n"); + return 1; + } + + return 0; +} + +static void kvm_clock_test(void *data) +{ + struct test_info *hv_test_info = (struct test_info *)data; + long i, check = hv_test_info->check; + + for (i = 0; i < hv_test_info->loops; i++){ + cycle_t t0, t1; + long long delta; + + if (check == 0) { + kvm_clock_read(); + continue; + } + + spin_lock(&hv_test_info->lock); + t1 = kvm_clock_read(); + t0 = hv_test_info->last; + hv_test_info->last = kvm_clock_read(); + spin_unlock(&hv_test_info->lock); + + delta = t1 - t0; + if (delta < 0) { + spin_lock(&hv_test_info->lock); + ++hv_test_info->warps; + if (delta < hv_test_info->worst){ + hv_test_info->worst = delta; + printf("Worst warp %lld %\n", hv_test_info->worst); + } + spin_unlock(&hv_test_info->lock); + } + if (delta == 0) + ++hv_test_info->stalls; + + if (!((unsigned long)i & 31)) + asm volatile("rep; nop"); + } + + atomic_dec(&hv_test_info->ncpus); +} + +static int cycle_test(int ncpus, long loops, int check, struct test_info *ti) +{ + int i; + unsigned long long begin, end; + + begin = rdtsc(); + + atomic_set(&ti->ncpus, ncpus); + ti->loops = loops; + ti->check = check; + for (i = ncpus - 1; i >= 0; i--) + on_cpu_async(i, kvm_clock_test, (void *)ti); + + /* Wait for the end of other vcpu */ + while(atomic_read(&ti->ncpus)) + ; + + end = rdtsc(); + + printf("Total vcpus: %d\n", ncpus); + printf("Test loops: %ld\n", ti->loops); + if (check == 1) { + printf("Total warps: %lld\n", ti->warps); + printf("Total stalls: %lld\n", ti->stalls); + printf("Worst warp: %lld\n", ti->worst); + } else + printf("TSC cycles: %lld\n", end - begin); + + return ti->warps ? 1 : 0; +} + +int main(int ac, char **av) +{ + int ncpus = cpu_count(); + int nerr = 0, i; + long loops = DEFAULT_TEST_LOOPS; + long sec = 0; + long threshold = DEFAULT_THRESHOLD; + + if (ac > 1) + loops = atol(av[1]); + if (ac > 2) + sec = atol(av[2]); + if (ac > 3) + threshold = atol(av[3]); + + smp_init(); + + if (ncpus > MAX_CPU) + ncpus = MAX_CPU; + for (i = 0; i < ncpus; ++i) + on_cpu(i, kvm_clock_init, (void *)0); + + if (ac > 2) + nerr += wallclock_test(sec, threshold); + + printf("Check the stability of raw cycle ...\n"); + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT + | PVCLOCK_RAW_CYCLE_BIT); + if (cycle_test(ncpus, loops, 1, &ti[0])) + printf("Raw cycle is not stable\n"); + else + printf("Raw cycle is stable\n"); + + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); + printf("Monotonic cycle test:\n"); + nerr += cycle_test(ncpus, loops, 1, &ti[1]); + + printf("Measure the performance of raw cycle ...\n"); + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT + | PVCLOCK_RAW_CYCLE_BIT); + cycle_test(ncpus, loops, 0, &ti[2]); + + printf("Measure the performance of adjusted cycle ...\n"); + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); + cycle_test(ncpus, loops, 0, &ti[3]); + + for (i = 0; i < ncpus; ++i) + on_cpu(i, kvm_clock_clear, (void *)0); + + return nerr > 0 ? 1 : 0; +} diff --git a/x86/unittests.cfg b/x86/unittests.cfg index 7796e41..228ac1d 100644 --- a/x86/unittests.cfg +++ b/x86/unittests.cfg @@ -63,3 +63,8 @@ extra_params = -enable-nesting -cpu qemu64,+svm file = svm.flat smp = 2 extra_params = -cpu qemu64,-svm + +[kvmclock_test] +file = kvmclock_test.flat +smp = 2 +extra_params = --append "10000000 `date +%s`"