@@ -340,8 +340,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
struct kvm_dirty_gfn *cur;
uint32_t count = 0;
- dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
-
while (true) {
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
if (!dirty_gfn_is_dirtied(cur))
@@ -360,17 +358,11 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
return count;
}
-static void dirty_ring_continue_vcpu(void)
-{
- pr_info("Notifying vcpu to continue\n");
- sem_post(&sem_vcpu_cont);
-}
-
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx)
{
- uint32_t count = 0, cleared;
+ uint32_t count, cleared;
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
@@ -385,9 +377,6 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
*/
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);
-
- if (READ_ONCE(dirty_ring_vcpu_ring_full))
- dirty_ring_continue_vcpu();
}
static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
@@ -404,7 +393,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
sem_post(&sem_vcpu_stop);
pr_info("Dirty ring full, waiting for it to be collected\n");
sem_wait(&sem_vcpu_cont);
- pr_info("vcpu continues now.\n");
WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
@@ -755,11 +743,52 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) {
+ bool saw_dirty_ring_full = false;
+ unsigned long i;
+
+ dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
+
/* Give the vcpu thread some time to dirty some pages */
- usleep(p->interval * 1000);
- log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
- bmap, host_num_pages,
- &ring_buf_idx);
+ for (i = 0; i < p->interval; i++) {
+ usleep(1000);
+
+ /*
+ * Reap dirty pages while the guest is running so that
+ * dirty ring full events are resolved, i.e. so that a
+ * larger interval doesn't always end up with a vCPU
+ * that's effectively blocked. Collecting while the
+ * guest is running also verifies KVM doesn't lose any
+ * state.
+ *
+ * For bitmap modes, KVM overwrites the entire bitmap,
+ * i.e. collecting the bitmaps is destructive. Collect
+ * the bitmap only on the first pass, otherwise this
+ * test would lose track of dirty pages.
+ */
+ if (i && host_log_mode != LOG_MODE_DIRTY_RING)
+ continue;
+
+ /*
+ * For the dirty ring, empty the ring on subsequent
+ * passes only if the ring was filled at least once,
+ * to verify KVM's handling of a full ring (emptying
+ * the ring on every pass would make it unlikely the
+ * vCPU would ever fill the fing).
+ */
+ if (READ_ONCE(dirty_ring_vcpu_ring_full))
+ saw_dirty_ring_full = true;
+ if (i && !saw_dirty_ring_full)
+ continue;
+
+ log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
+ bmap, host_num_pages,
+ &ring_buf_idx);
+
+ if (READ_ONCE(dirty_ring_vcpu_ring_full)) {
+ pr_info("Dirty ring emptied, restarting vCPU\n");
+ sem_post(&sem_vcpu_cont);
+ }
+ }
/*
* See vcpu_sync_stop_requested definition for details on why