diff mbox series

[14/17] async: add debugging assertions for record/replay in bh APIs

Message ID 20241220104220.2007786-15-npiggin@gmail.com (mailing list archive)
State New
Headers show
Series replay: Fixes and avocado test updates | expand

Commit Message

Nicholas Piggin Dec. 20, 2024, 10:42 a.m. UTC
Code using old bh APIs must be updated to account for whether the
bh is related to the target machine model or host QEMU operation,
in order for record/replay to work properly.

Add some assertions in the old APIs to catch unconverted code when
record/replay is active.

This caught the IDE bug when running replay_linux.py avocado test
with the x86-64 q35 non-virtio machine.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 include/block/aio.h    |  2 +-
 replay/replay-events.c | 20 ++++++++------------
 util/async.c           | 29 +++++++++++++++++++++++++++++
 3 files changed, 38 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/include/block/aio.h b/include/block/aio.h
index 26859bd0b93..991aaae707d 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -309,7 +309,7 @@  void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
  */
 #define aio_bh_schedule_oneshot(ctx, cb, opaque) \
     aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)), \
-                                 QEMU_CLOCK_REALTIME)
+                                 QEMU_CLOCK_MAX)
 
 /**
  * aio_bh_new_full: Allocate a new bottom half structure.
diff --git a/replay/replay-events.c b/replay/replay-events.c
index 6a7c27cac1e..0b3dbfd46b9 100644
--- a/replay/replay-events.c
+++ b/replay/replay-events.c
@@ -123,23 +123,19 @@  void replay_add_event(ReplayAsyncEventKind event_kind,
 
 void replay_bh_schedule_event(QEMUBH *bh)
 {
-    if (events_enabled) {
-        uint64_t id = replay_get_current_icount();
-        replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
-    } else {
-        qemu_bh_schedule(bh);
-    }
+    uint64_t id;
+    g_assert(events_enabled);
+    id = replay_get_current_icount();
+    replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id);
 }
 
 void replay_bh_oneshot_event(AioContext *ctx,
     QEMUBHFunc *cb, void *opaque)
 {
-    if (events_enabled) {
-        uint64_t id = replay_get_current_icount();
-        replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
-    } else {
-        aio_bh_schedule_oneshot(ctx, cb, opaque);
-    }
+    uint64_t id;
+    g_assert(events_enabled);
+    id = replay_get_current_icount();
+    replay_add_event(REPLAY_ASYNC_EVENT_BH_ONESHOT, cb, opaque, id);
 }
 
 void replay_add_input_event(struct InputEvent *event)
diff --git a/util/async.c b/util/async.c
index 5d2c76dec08..72a9eccffbe 100644
--- a/util/async.c
+++ b/util/async.c
@@ -58,6 +58,9 @@  enum {
 
     /* Schedule periodically when the event loop is idle */
     BH_IDLE      = (1 << 4),
+
+    /* BH being handled by replay machinery */
+    BH_REPLAY    = (1 << 4),
 };
 
 struct QEMUBH {
@@ -145,6 +148,17 @@  void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
                                   void *opaque, const char *name,
                                   QEMUClockType clock_type)
 {
+    if (clock_type == QEMU_CLOCK_MAX) {
+        /*
+         * aio_bh_schedule_oneshot() uses QEMU_CLOCK_MAX to say it does not
+         * know about clock context to use. It will not work in record/replay.
+         * Callers should be converted to aio_bh_schedule_oneshot_event()
+         * then this can be removed when the old API goes away.
+         */
+        g_assert(replay_mode == REPLAY_MODE_NONE);
+        clock_type = QEMU_CLOCK_REALTIME;
+    }
+
     switch (clock_type) {
     case QEMU_CLOCK_VIRTUAL:
     case QEMU_CLOCK_VIRTUAL_RT:
@@ -178,6 +192,12 @@  void aio_bh_call(QEMUBH *bh)
 {
     bool last_engaged_in_io = false;
 
+    if (bh->flags & BH_REPLAY) {
+        g_assert(!(bh->flags & BH_SCHEDULED));
+        g_assert(!(bh->flags & BH_DELETED));
+        g_assert(!(bh->flags & BH_PENDING));
+        bh->flags &= ~BH_REPLAY;
+    }
     /* Make a copy of the guard-pointer as cb may free the bh */
     MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
     if (reentrancy_guard) {
@@ -252,6 +272,7 @@  void qemu_bh_schedule_event(QEMUBH *bh, QEMUClockType clock_type)
     case QEMU_CLOCK_VIRTUAL_RT:
         if (replay_mode != REPLAY_MODE_NONE) {
             /* Record/replay must intercept bh events */
+            qatomic_fetch_or(&bh->flags, BH_REPLAY);
             replay_bh_schedule_event(bh);
             break;
         }
@@ -268,11 +289,15 @@  void qemu_bh_schedule_event_noreplay(QEMUBH *bh)
 
 void qemu_bh_schedule_idle(QEMUBH *bh)
 {
+    /* No mechanism for scheduling idle replay-scheduled bh at the moment */
+    g_assert(replay_mode == REPLAY_MODE_NONE);
     aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
 }
 
 void qemu_bh_schedule(QEMUBH *bh)
 {
+    /* Callers should be converted to use qemu_bh_schedule_event */
+    g_assert(replay_mode == REPLAY_MODE_NONE);
     aio_bh_enqueue(bh, BH_SCHEDULED);
 }
 
@@ -280,6 +305,8 @@  void qemu_bh_schedule(QEMUBH *bh)
  */
 void qemu_bh_cancel(QEMUBH *bh)
 {
+    /* No mechanism for canceling replay-scheduled bh at the moment */
+    g_assert(!(bh->flags & BH_REPLAY));
     qatomic_and(&bh->flags, ~BH_SCHEDULED);
 }
 
@@ -288,6 +315,8 @@  void qemu_bh_cancel(QEMUBH *bh)
  */
 void qemu_bh_delete(QEMUBH *bh)
 {
+    /* No mechanism for deleting replay-scheduled bh at the moment */
+    g_assert(!(bh->flags & BH_REPLAY));
     aio_bh_enqueue(bh, BH_DELETED);
 }