@@ -793,15 +793,15 @@ static void block_job_do_yield(BlockJob *job, uint64_t ns)
assert(job->busy);
}
-void coroutine_fn block_job_pause_point(BlockJob *job)
+int coroutine_fn block_job_pause_point(BlockJob *job)
{
assert(job && block_job_started(job));
- if (!block_job_should_pause(job)) {
- return;
- }
if (block_job_is_cancelled(job)) {
- return;
+ return -ECANCELED;
+ }
+ if (!block_job_should_pause(job)) {
+ return 0;
}
if (job->driver->pause) {
@@ -817,6 +817,8 @@ void coroutine_fn block_job_pause_point(BlockJob *job)
if (job->driver->resume) {
job->driver->resume(job);
}
+
+ return 0;
}
void block_job_resume_all(void)
@@ -874,20 +876,20 @@ bool block_job_is_cancelled(BlockJob *job)
return job->cancelled;
}
-void block_job_sleep_ns(BlockJob *job, int64_t ns)
+int block_job_sleep_ns(BlockJob *job, int64_t ns)
{
assert(job->busy);
/* Check cancellation *before* setting busy = false, too! */
if (block_job_is_cancelled(job)) {
- return;
+ return -ECANCELED;
}
if (!block_job_should_pause(job)) {
block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
}
- block_job_pause_point(job);
+ return block_job_pause_point(job);
}
void block_job_yield(BlockJob *job)
@@ -906,13 +908,17 @@ void block_job_yield(BlockJob *job)
block_job_pause_point(job);
}
-void block_job_relax(BlockJob *job, int64_t delay_ns)
+int block_job_relax(BlockJob *job, int64_t delay_ns)
{
+ if (block_job_is_cancelled(job)) {
+ return -ECANCELED;
+ }
+
if (delay_ns || (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - \
job->last_enter_ns > SLICE_TIME)) {
- block_job_sleep_ns(job, delay_ns);
+ return block_job_sleep_ns(job, delay_ns);
} else {
- block_job_pause_point(job);
+ return block_job_pause_point(job);
}
}
@@ -147,7 +147,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
*/
-void block_job_sleep_ns(BlockJob *job, int64_t ns);
+int block_job_sleep_ns(BlockJob *job, int64_t ns);
/**
* block_job_yield:
@@ -167,8 +167,10 @@ void block_job_yield(BlockJob *job);
* If delay_ns is 0, yield if it has been SLICE_TIME
* nanoseconds since the last yield. Otherwise, check
* if we need to yield for a pause event.
+ *
+ * returns ECANCELED if the job has been canceled.
*/
-void block_job_relax(BlockJob *job, int64_t delay_ns);
+int block_job_relax(BlockJob *job, int64_t delay_ns);
/**
* block_job_pause_all:
@@ -217,7 +219,7 @@ bool block_job_is_cancelled(BlockJob *job);
* Pause now if block_job_pause() has been called. Block jobs that perform
* lots of I/O must call this between requests so that the job can be paused.
*/
-void coroutine_fn block_job_pause_point(BlockJob *job);
+int coroutine_fn block_job_pause_point(BlockJob *job);
/**
* block_job_enter:
This is just an optimization for callers who are likely going to want to check quite close to this call if the job was canceled or not anyway. Along the same lines, add the return to block_job_pause_point and block_job_sleep_ns, so we don't have to re-check it quite so excessively. Signed-off-by: John Snow <jsnow@redhat.com> --- blockjob.c | 28 +++++++++++++++++----------- include/block/blockjob_int.h | 8 +++++--- 2 files changed, 22 insertions(+), 14 deletions(-)