Message ID | 20190130004811.27372-63-cota@braap.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | per-CPU locks | expand |
Emilio G. Cota <cota@braap.org> writes: > It will gain some users soon. > > Suggested-by: Paolo Bonzini <pbonzini@redhat.com> > Reviewed-by: Richard Henderson <richard.henderson@linaro.org> > Signed-off-by: Emilio G. Cota <cota@braap.org> > --- > include/qom/cpu.h | 36 +++++++++++++++++++++++++++++++++--- > 1 file changed, 33 insertions(+), 3 deletions(-) > > diff --git a/include/qom/cpu.h b/include/qom/cpu.h > index 96a5d0cb94..27a80bc113 100644 > --- a/include/qom/cpu.h > +++ b/include/qom/cpu.h > @@ -27,6 +27,7 @@ > #include "qapi/qapi-types-run-state.h" > #include "qemu/bitmap.h" > #include "qemu/fprintf-fn.h" > +#include "qemu/main-loop.h" > #include "qemu/rcu_queue.h" > #include "qemu/queue.h" > #include "qemu/thread.h" > @@ -87,6 +88,8 @@ struct TranslationBlock; > * @reset_dump_flags: #CPUDumpFlags to use for reset logging. > * @has_work: Callback for checking if there is work to do. Called with the > * CPU lock held. > + * @has_work_with_iothread_lock: Callback for checking if there is work to do. > + * Called with both the BQL and the CPU lock held. > * @do_interrupt: Callback for interrupt handling. > * @do_unassigned_access: Callback for unassigned access handling. > * (this is deprecated: new targets should use do_transaction_failed instead) > @@ -158,6 +161,7 @@ typedef struct CPUClass { > void (*reset)(CPUState *cpu); > int reset_dump_flags; > bool (*has_work)(CPUState *cpu); > + bool (*has_work_with_iothread_lock)(CPUState *cpu); > void (*do_interrupt)(CPUState *cpu); > CPUUnassignedAccess do_unassigned_access; > void (*do_unaligned_access)(CPUState *cpu, vaddr addr, > @@ -796,14 +800,40 @@ const char *parse_cpu_model(const char *cpu_model); > static inline bool cpu_has_work(CPUState *cpu) > { > CPUClass *cc = CPU_GET_CLASS(cpu); > + bool has_cpu_lock = cpu_mutex_locked(cpu); > + bool (*func)(CPUState *cpu); > bool ret; > > + if (cc->has_work_with_iothread_lock) { > + if (qemu_mutex_iothread_locked()) { > + func = cc->has_work_with_iothread_lock; > + goto call_func; > + } > + > + if (has_cpu_lock) { > + /* avoid deadlock by acquiring the locks in order */ This is fine here but can we expand the comment above: * cpu_has_work: * @cpu: The vCPU to check. * * Checks whether the CPU has work to do. If the vCPU helper needs to * check it's work status with the BQL held ensure we hold the BQL * before taking the CPU lock. Where is our canonical description of the locking interaction between BQL and CPU locks? Otherwise: Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > + cpu_mutex_unlock(cpu); > + } > + qemu_mutex_lock_iothread(); > + cpu_mutex_lock(cpu); > + > + ret = cc->has_work_with_iothread_lock(cpu); > + > + qemu_mutex_unlock_iothread(); > + if (!has_cpu_lock) { > + cpu_mutex_unlock(cpu); > + } > + return ret; > + } > + > g_assert(cc->has_work); > - if (cpu_mutex_locked(cpu)) { > - return cc->has_work(cpu); > + func = cc->has_work; > + call_func: > + if (has_cpu_lock) { > + return func(cpu); > } > cpu_mutex_lock(cpu); > - ret = cc->has_work(cpu); > + ret = func(cpu); > cpu_mutex_unlock(cpu); > return ret; > } -- Alex Bennée
On Fri, Feb 08, 2019 at 11:33:32 +0000, Alex Bennée wrote: > > Emilio G. Cota <cota@braap.org> writes: > > > It will gain some users soon. > > > > Suggested-by: Paolo Bonzini <pbonzini@redhat.com> > > Reviewed-by: Richard Henderson <richard.henderson@linaro.org> > > Signed-off-by: Emilio G. Cota <cota@braap.org> > > --- > > include/qom/cpu.h | 36 +++++++++++++++++++++++++++++++++--- (snip) > > static inline bool cpu_has_work(CPUState *cpu) > > { > > CPUClass *cc = CPU_GET_CLASS(cpu); > > + bool has_cpu_lock = cpu_mutex_locked(cpu); > > + bool (*func)(CPUState *cpu); > > bool ret; > > > > + if (cc->has_work_with_iothread_lock) { > > + if (qemu_mutex_iothread_locked()) { > > + func = cc->has_work_with_iothread_lock; > > + goto call_func; > > + } > > + > > + if (has_cpu_lock) { > > + /* avoid deadlock by acquiring the locks in order */ > > This is fine here but can we expand the comment above: > > * cpu_has_work: > * @cpu: The vCPU to check. > * > * Checks whether the CPU has work to do. If the vCPU helper needs to > * check it's work status with the BQL held ensure we hold the BQL > * before taking the CPU lock. I added a comment to the body of the function: + /* some targets require us to hold the BQL when checking for work */ if (cc->has_work_with_iothread_lock) { > Where is our canonical description of the locking interaction between > BQL and CPU locks? It's in a few places, for instance cpu_mutex_lock's documentation in include/qom/cpu.h. I've added a comment about the locking order to @lock's documentation, also in cpu.h: - * @lock: Lock to prevent multiple access to per-CPU fields. + * @lock: Lock to prevent multiple access to per-CPU fields. Must be acqrd + * after the BQL. > Otherwise: > > Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Thanks! Emilio
diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 96a5d0cb94..27a80bc113 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -27,6 +27,7 @@ #include "qapi/qapi-types-run-state.h" #include "qemu/bitmap.h" #include "qemu/fprintf-fn.h" +#include "qemu/main-loop.h" #include "qemu/rcu_queue.h" #include "qemu/queue.h" #include "qemu/thread.h" @@ -87,6 +88,8 @@ struct TranslationBlock; * @reset_dump_flags: #CPUDumpFlags to use for reset logging. * @has_work: Callback for checking if there is work to do. Called with the * CPU lock held. + * @has_work_with_iothread_lock: Callback for checking if there is work to do. + * Called with both the BQL and the CPU lock held. * @do_interrupt: Callback for interrupt handling. * @do_unassigned_access: Callback for unassigned access handling. * (this is deprecated: new targets should use do_transaction_failed instead) @@ -158,6 +161,7 @@ typedef struct CPUClass { void (*reset)(CPUState *cpu); int reset_dump_flags; bool (*has_work)(CPUState *cpu); + bool (*has_work_with_iothread_lock)(CPUState *cpu); void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; void (*do_unaligned_access)(CPUState *cpu, vaddr addr, @@ -796,14 +800,40 @@ const char *parse_cpu_model(const char *cpu_model); static inline bool cpu_has_work(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); + bool has_cpu_lock = cpu_mutex_locked(cpu); + bool (*func)(CPUState *cpu); bool ret; + if (cc->has_work_with_iothread_lock) { + if (qemu_mutex_iothread_locked()) { + func = cc->has_work_with_iothread_lock; + goto call_func; + } + + if (has_cpu_lock) { + /* avoid deadlock by acquiring the locks in order */ + cpu_mutex_unlock(cpu); + } + qemu_mutex_lock_iothread(); + cpu_mutex_lock(cpu); + + ret = cc->has_work_with_iothread_lock(cpu); + + qemu_mutex_unlock_iothread(); + if (!has_cpu_lock) { + cpu_mutex_unlock(cpu); + } + return ret; + } + g_assert(cc->has_work); - if (cpu_mutex_locked(cpu)) { - return cc->has_work(cpu); + func = cc->has_work; + call_func: + if (has_cpu_lock) { + return func(cpu); } cpu_mutex_lock(cpu); - ret = cc->has_work(cpu); + ret = func(cpu); cpu_mutex_unlock(cpu); return ret; }