diff mbox series

[v3,01/16] job.c: make job_mutex and job_lock/unlock() public

Message ID 20220105140208.365608-2-eesposit@redhat.com (mailing list archive)
State New, archived
Headers show
Series job: replace AioContext lock with job_mutex | expand

Commit Message

Emanuele Giuseppe Esposito Jan. 5, 2022, 2:01 p.m. UTC
job mutex will be used to protect the job struct elements and list,
replacing AioContext locks.

Right now use a shared lock for all jobs, in order to keep things
simple. Once the AioContext lock is gone, we can introduce per-job
locks.

To simplify the switch from aiocontext to job lock, introduce
*nop* lock/unlock functions and macros. Once everything is protected
by jobs, we can add the mutex and remove the aiocontext.

Since job_mutex is already being used, add static
real_job_{lock/unlock}.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 include/qemu/job.h | 24 ++++++++++++++++++++++++
 job.c              | 35 +++++++++++++++++++++++------------
 2 files changed, 47 insertions(+), 12 deletions(-)

Comments

Paolo Bonzini Jan. 19, 2022, 9:56 a.m. UTC | #1
On 1/5/22 15:01, Emanuele Giuseppe Esposito wrote:
> job mutex will be used to protect the job struct elements and list,
> replacing AioContext locks.
> 
> Right now use a shared lock for all jobs, in order to keep things
> simple. Once the AioContext lock is gone, we can introduce per-job
> locks.

Not even needed in my opinion, this is not a fast path.  But we'll see.

> To simplify the switch from aiocontext to job lock, introduce
> *nop* lock/unlock functions and macros. Once everything is protected
> by jobs, we can add the mutex and remove the aiocontext.
> 
> Since job_mutex is already being used, add static
> real_job_{lock/unlock}.

Out of curiosity, what breaks if the real job lock is used from the 
start?  (It probably should be mentioned in the commit message).


> -static void job_lock(void)
> +static void real_job_lock(void)
>   {
>       qemu_mutex_lock(&job_mutex);
>   }
>   
> -static void job_unlock(void)
> +static void real_job_unlock(void)
>   {
>       qemu_mutex_unlock(&job_mutex);
>   }

Would it work to

#define job_lock real_job_lock
#define job_unlock real_job_unlock

instead of having to do the changes below?

Paolo

> @@ -449,21 +460,21 @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
>           return;
>       }
>   
> -    job_lock();
> +    real_job_lock();
>       if (job->busy) {
> -        job_unlock();
> +        real_job_unlock();
>           return;
>       }
>   
>       if (fn && !fn(job)) {
> -        job_unlock();
> +        real_job_unlock();
>           return;
>       }
>   
>       assert(!job->deferred_to_main_loop);
>       timer_del(&job->sleep_timer);
>       job->busy = true;
> -    job_unlock();
> +    real_job_unlock();
>       aio_co_enter(job->aio_context, job->co);
>   }
>   
> @@ -480,13 +491,13 @@ void job_enter(Job *job)
>    * called explicitly. */
>   static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
>   {
> -    job_lock();
> +    real_job_lock();
>       if (ns != -1) {
>           timer_mod(&job->sleep_timer, ns);
>       }
>       job->busy = false;
>       job_event_idle(job);
> -    job_unlock();
> +    real_job_unlock();
>       qemu_coroutine_yield();
>   
>       /* Set by job_enter_cond() before re-entering the coroutine.  */
Paolo Bonzini Jan. 19, 2022, 11:13 a.m. UTC | #2
On 1/19/22 10:56, Paolo Bonzini wrote:
> On 1/5/22 15:01, Emanuele Giuseppe Esposito wrote:
>> job mutex will be used to protect the job struct elements and list,
>> replacing AioContext locks.
>>
>> Right now use a shared lock for all jobs, in order to keep things
>> simple. Once the AioContext lock is gone, we can introduce per-job
>> locks.
> 
> Not even needed in my opinion, this is not a fast path.  But we'll see.
> 
>> To simplify the switch from aiocontext to job lock, introduce
>> *nop* lock/unlock functions and macros. Once everything is protected
>> by jobs, we can add the mutex and remove the aiocontext.
>>
>> Since job_mutex is already being used, add static
>> real_job_{lock/unlock}.
> 
> Out of curiosity, what breaks if the real job lock is used from the 
> start?  (It probably should be mentioned in the commit message).
> 
> 
>> -static void job_lock(void)
>> +static void real_job_lock(void)
>>   {
>>       qemu_mutex_lock(&job_mutex);
>>   }
>> -static void job_unlock(void)
>> +static void real_job_unlock(void)
>>   {
>>       qemu_mutex_unlock(&job_mutex);
>>   }
> 
> Would it work to
> 
> #define job_lock real_job_lock
> #define job_unlock real_job_unlock
> 
> instead of having to do the changes below?

Ignore all this, please.

Paolo
diff mbox series

Patch

diff --git a/include/qemu/job.h b/include/qemu/job.h
index 915ceff425..8d0d370dda 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -312,6 +312,30 @@  typedef enum JobCreateFlags {
     JOB_MANUAL_DISMISS = 0x04,
 } JobCreateFlags;
 
+extern QemuMutex job_mutex;
+
+#define JOB_LOCK_GUARD() /* QEMU_LOCK_GUARD(&job_mutex) */
+
+#define WITH_JOB_LOCK_GUARD() /* WITH_QEMU_LOCK_GUARD(&job_mutex) */
+
+/**
+ * job_lock:
+ *
+ * Take the mutex protecting the list of jobs and their status.
+ * Most functions called by the monitor need to call job_lock
+ * and job_unlock manually.  On the other hand, function called
+ * by the block jobs themselves and by the block layer will take the
+ * lock for you.
+ */
+void job_lock(void);
+
+/**
+ * job_unlock:
+ *
+ * Release the mutex protecting the list of jobs and their status.
+ */
+void job_unlock(void);
+
 /**
  * Allocate and return a new job transaction. Jobs can be added to the
  * transaction using job_txn_add_job().
diff --git a/job.c b/job.c
index e048037099..ccf737a179 100644
--- a/job.c
+++ b/job.c
@@ -32,6 +32,12 @@ 
 #include "trace/trace-root.h"
 #include "qapi/qapi-events-job.h"
 
+/*
+ * job_mutex protects the jobs list, but also makes the
+ * struct job fields thread-safe.
+ */
+QemuMutex job_mutex;
+
 static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
 
 /* Job State Transition Table */
@@ -74,17 +80,22 @@  struct JobTxn {
     int refcnt;
 };
 
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
- * and job->sleep_timer, such as concurrent calls to job_do_yield and
- * job_enter. */
-static QemuMutex job_mutex;
+void job_lock(void)
+{
+    /* nop */
+}
+
+void job_unlock(void)
+{
+    /* nop */
+}
 
-static void job_lock(void)
+static void real_job_lock(void)
 {
     qemu_mutex_lock(&job_mutex);
 }
 
-static void job_unlock(void)
+static void real_job_unlock(void)
 {
     qemu_mutex_unlock(&job_mutex);
 }
@@ -449,21 +460,21 @@  void job_enter_cond(Job *job, bool(*fn)(Job *job))
         return;
     }
 
-    job_lock();
+    real_job_lock();
     if (job->busy) {
-        job_unlock();
+        real_job_unlock();
         return;
     }
 
     if (fn && !fn(job)) {
-        job_unlock();
+        real_job_unlock();
         return;
     }
 
     assert(!job->deferred_to_main_loop);
     timer_del(&job->sleep_timer);
     job->busy = true;
-    job_unlock();
+    real_job_unlock();
     aio_co_enter(job->aio_context, job->co);
 }
 
@@ -480,13 +491,13 @@  void job_enter(Job *job)
  * called explicitly. */
 static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
 {
-    job_lock();
+    real_job_lock();
     if (ns != -1) {
         timer_mod(&job->sleep_timer, ns);
     }
     job->busy = false;
     job_event_idle(job);
-    job_unlock();
+    real_job_unlock();
     qemu_coroutine_yield();
 
     /* Set by job_enter_cond() before re-entering the coroutine.  */