@@ -1174,7 +1174,7 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
if (blk->quiesce_counter && !blk->disable_request_queuing) {
blk_dec_in_flight(blk);
- qemu_co_queue_wait(&blk->queued_requests, NULL);
+ qemu_co_queue_wait(&blk->queued_requests, QLNULL);
blk_inc_in_flight(blk);
}
}
@@ -2367,7 +2367,7 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
if (blk->dev_ops && blk->dev_ops->drained_end) {
blk->dev_ops->drained_end(blk->dev_opaque);
}
- while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
+ while (qemu_co_enter_next(&blk->queued_requests, QLNULL)) {
/* Resume all queued requests */
}
}
@@ -120,7 +120,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
return false;
}
- qemu_co_queue_wait(&task->wait_queue, NULL);
+ qemu_co_queue_wait(&task->wait_queue, QLNULL);
return true;
}
@@ -28,6 +28,7 @@
#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
+
/* The mirroring buffer is a list of granularity-sized chunks.
* Free chunks are organized in a list.
*/
@@ -157,7 +158,7 @@ static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
if (ranges_overlap(self_start_chunk, self_nb_chunks,
op_start_chunk, op_nb_chunks))
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
break;
}
}
@@ -297,7 +298,7 @@ mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
if (!op->is_pseudo_op && op->is_in_flight &&
op->is_active_write == active)
{
- qemu_co_queue_wait(&op->waiting_requests, NULL);
+ qemu_co_queue_wait(&op->waiting_requests, QLNULL);
return;
}
}
@@ -22,13 +22,13 @@
static void fsdev_throttle_read_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[false], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[false], QLNULL);
}
static void fsdev_throttle_write_timer_cb(void *opaque)
{
FsThrottle *fst = opaque;
- qemu_co_enter_next(&fst->throttled_reqs[true], NULL);
+ qemu_co_enter_next(&fst->throttled_reqs[true], QLNULL);
}
int fsdev_throttle_parse_opts(QemuOpts *opts, FsThrottle *fst, Error **errp)
@@ -100,7 +100,7 @@ void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
if (throttle_enabled(&fst->cfg)) {
if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
- qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
+ qemu_co_queue_wait(&fst->throttled_reqs[is_write], QLNULL);
}
throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
@@ -2888,7 +2888,7 @@ static void coroutine_fn v9fs_flush(void *opaque)
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete, NULL);
+ qemu_co_queue_wait(&cancel_pdu->complete, QLNULL);
if (!qemu_co_queue_next(&cancel_pdu->complete)) {
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
@@ -24,6 +24,9 @@ struct QemuLockable {
QemuLockUnlockFunc *unlock;
};
+#define QLNULL ((QemuLockable *)0)
+
+
/* This function gives an error if an invalid, non-NULL pointer type is passed
* to QEMU_MAKE_LOCKABLE. For optimized builds, we can rely on dead-code elimination
* from the compiler, and give the errors already at link time.
@@ -64,7 +64,7 @@ void coroutine_fn co_get_from_shres(SharedResource *s, uint64_t n)
{
assert(n <= s->total);
while (!co_try_get_from_shres(s, n)) {
- qemu_co_queue_wait(&s->queue, NULL);
+ qemu_co_queue_wait(&s->queue, QLNULL);
}
}