@@ -23,7 +23,6 @@
#include "exec/memory-internal.h"
bool exit_request;
-CPUState *tcg_current_cpu;
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
@@ -291,7 +291,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
*/
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (!tb) {
-#ifdef CONFIG_USER_ONLY
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. tb_lock is released later in
* cpu_exec.
@@ -303,7 +302,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu,
* duplicated TB in the pool.
*/
tb = tb_find_physical(cpu, pc, cs_base, flags);
-#endif
if (!tb) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
@@ -408,13 +406,8 @@ int cpu_exec(CPUState *cpu)
cpu->halted = 0;
}
- atomic_mb_set(&tcg_current_cpu, cpu);
rcu_read_lock();
- if (unlikely(atomic_mb_read(&exit_request))) {
- cpu->exit_request = 1;
- }
-
cc->cpu_exec_enter(cpu);
/* Calculate difference between guest clock and host clock.
@@ -533,7 +526,6 @@ int cpu_exec(CPUState *cpu)
}
if (unlikely(cpu->exit_request
|| replay_has_interrupt())) {
- cpu->exit_request = 0;
cpu->exception_index = EXCP_INTERRUPT;
cpu_loop_exit(cpu);
}
@@ -657,10 +649,5 @@ int cpu_exec(CPUState *cpu)
cc->cpu_exec_exit(cpu);
rcu_read_unlock();
- /* fail safe : never use current_cpu outside cpu_exec() */
- current_cpu = NULL;
-
- /* Does not need atomic_mb_set because a spurious wakeup is okay. */
- atomic_set(&tcg_current_cpu, NULL);
return ret;
}
@@ -957,10 +957,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
qemu_cpu_kick(cpu);
while (!atomic_mb_read(&wi.done)) {
- CPUState *self_cpu = current_cpu;
-
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
- current_cpu = self_cpu;
}
}
@@ -1022,27 +1019,29 @@ static void flush_queued_work(CPUState *cpu)
static void qemu_wait_io_event_common(CPUState *cpu)
{
+ atomic_mb_set(&cpu->thread_kicked, false);
if (cpu->stop) {
cpu->stop = false;
cpu->stopped = true;
qemu_cond_broadcast(&qemu_pause_cond);
}
flush_queued_work(cpu);
- cpu->thread_kicked = false;
}
static void qemu_tcg_wait_io_event(CPUState *cpu)
{
- while (all_cpu_threads_idle()) {
- /* Start accounting real time to the virtual clock if the CPUs
- are idle. */
- qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ while (cpu_thread_is_idle(cpu)) {
+ /* Start accounting real time to the virtual clock if the CPUs
+ * are idle.
+ */
+ if ((all_cpu_threads_idle()) && (cpu->cpu_index == 0)) {
+ /* qemu_account_warp_timer(); */
+ qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+ }
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
- CPU_FOREACH(cpu) {
- qemu_wait_io_event_common(cpu);
- }
+ qemu_wait_io_event_common(cpu);
}
static void qemu_kvm_wait_io_event(CPUState *cpu)
@@ -1109,6 +1108,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->can_do_io = 1;
+ current_cpu = cpu;
sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI);
@@ -1117,9 +1117,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
cpu->created = true;
qemu_cond_signal(&qemu_cpu_cond);
- current_cpu = cpu;
while (1) {
- current_cpu = NULL;
qemu_mutex_unlock_iothread();
do {
int sig;
@@ -1130,7 +1128,6 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
exit(1);
}
qemu_mutex_lock_iothread();
- current_cpu = cpu;
qemu_wait_io_event_common(cpu);
}
@@ -1147,32 +1144,40 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
* elsewhere.
*/
static int tcg_cpu_exec(CPUState *cpu);
-static void qemu_cpu_kick_no_halt(void);
+
+struct kick_info {
+ QEMUTimer *timer;
+ CPUState *cpu;
+};
static void kick_tcg_thread(void *opaque)
{
- QEMUTimer *self = *(QEMUTimer **) opaque;
- timer_mod(self,
+ struct kick_info *info = (struct kick_info *) opaque;
+ CPUState *cpu = atomic_mb_read(&info->cpu);
+
+ timer_mod(info->timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
- qemu_cpu_kick_no_halt();
+
+ if (cpu) {
+ cpu_exit(cpu);
+ }
}
-static void *qemu_tcg_cpu_thread_fn(void *arg)
+static void *qemu_tcg_single_cpu_thread_fn(void *arg)
{
+ struct kick_info info;
CPUState *cpu = arg;
- QEMUTimer *kick_timer;
rcu_register_thread();
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
- CPU_FOREACH(cpu) {
- cpu->thread_id = qemu_get_thread_id();
- cpu->created = true;
- cpu->can_do_io = 1;
- }
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
qemu_cond_signal(&qemu_cpu_cond);
/* wait for initial kick-off after machine start */
@@ -1187,14 +1192,18 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* Set to kick if we have to do more than one vCPU */
if (CPU_NEXT(first_cpu)) {
- kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, &kick_timer);
- timer_mod(kick_timer,
+ info.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, &info);
+ info.cpu = NULL;
+ smp_wmb();
+ timer_mod(info.timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
get_ticks_per_sec() / 10);
}
/* process any pending work */
- atomic_mb_set(&exit_request, 1);
+ CPU_FOREACH(cpu) {
+ atomic_mb_set(&cpu->exit_request, 1);
+ }
cpu = first_cpu;
@@ -1207,7 +1216,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu = first_cpu;
}
- for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
+ for (; cpu != NULL && !cpu->exit_request; cpu = CPU_NEXT(cpu)) {
+ atomic_mb_set(&info.cpu, cpu);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
@@ -1224,17 +1234,64 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
} /* for cpu.. */
+ atomic_mb_set(&info.cpu, NULL);
+
/* Pairs with smp_wmb in qemu_cpu_kick. */
- atomic_mb_set(&exit_request, 0);
+ CPU_FOREACH(cpu) {
+ atomic_mb_set(&cpu->exit_request, 0);
+ }
if (use_icount) {
- int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+ int64_t deadline =
+ qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ if (deadline == 0) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
+ }
+
+ qemu_tcg_wait_io_event(first_cpu);
+ }
+
+ return NULL;
+}
+
+/* Multi-threaded TCG
+ *
+ * In the multi-threaded case each vCPU has its own thread. The TLS
+ * variable current_cpu can be used deep in the code to find the
+ * current CPUState for a given thread.
+ */
+
+static void *qemu_tcg_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ while (1) {
+ if (!cpu->stopped) {
+ tcg_cpu_exec(cpu);
- if (deadline == 0) {
- qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ if (use_icount) {
+ int64_t deadline =
+ qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
+
+ if (deadline == 0) {
+ qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
+ }
}
}
- qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
+ qemu_tcg_wait_io_event(cpu);
}
return NULL;
@@ -1259,24 +1316,11 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
#endif
}
-static void qemu_cpu_kick_no_halt(void)
-{
- CPUState *cpu;
- /* Ensure whatever caused the exit has reached the CPU threads before
- * writing exit_request.
- */
- atomic_mb_set(&exit_request, 1);
- cpu = atomic_mb_read(&tcg_current_cpu);
- if (cpu) {
- cpu_exit(cpu);
- }
-}
-
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
if (tcg_enabled()) {
- qemu_cpu_kick_no_halt();
+ cpu_exit(cpu);
} else {
qemu_cpu_kick_thread(cpu);
}
@@ -1342,13 +1386,6 @@ void pause_all_vcpus(void)
if (qemu_in_vcpu_thread()) {
cpu_stop_current();
- if (!kvm_enabled()) {
- CPU_FOREACH(cpu) {
- cpu->stop = false;
- cpu->stopped = true;
- }
- return;
- }
}
while (!all_vcpus_paused()) {
@@ -1382,29 +1419,41 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- static QemuCond *tcg_halt_cond;
- static QemuThread *tcg_cpu_thread;
+ static QemuCond *single_tcg_halt_cond;
+ static QemuThread *single_tcg_cpu_thread;
- /* share a single thread for all cpus with TCG */
- if (!tcg_cpu_thread) {
+ if (qemu_tcg_mttcg_enabled() || !single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
- tcg_halt_cond = cpu->halt_cond;
- snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
+
+ if (qemu_tcg_mttcg_enabled()) {
+ /* create a thread per vCPU with TCG (MTTCG) */
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
cpu->cpu_index);
- qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
- cpu, QEMU_THREAD_JOINABLE);
+
+ qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+
+ } else {
+ /* share a single thread for all cpus with TCG */
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
+ qemu_thread_create(cpu->thread, thread_name, qemu_tcg_single_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+
+ single_tcg_halt_cond = cpu->halt_cond;
+ single_tcg_cpu_thread = cpu->thread;
+ }
#ifdef _WIN32
cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
while (!cpu->created) {
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
}
- tcg_cpu_thread = cpu->thread;
} else {
- cpu->thread = tcg_cpu_thread;
- cpu->halt_cond = tcg_halt_cond;
+ /* For non-MTTCG cases we share the thread */
+ cpu->thread = single_tcg_cpu_thread;
+ cpu->halt_cond = single_tcg_halt_cond;
}
}
@@ -455,8 +455,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr);
/* vl.c */
extern int singlestep;
-/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
-extern CPUState *tcg_current_cpu;
-extern bool exit_request;
-
#endif
@@ -121,47 +121,37 @@ static void *l1_map[V_L1_SIZE];
TCGContext tcg_ctx;
/* translation block context */
-#ifdef CONFIG_USER_ONLY
__thread int have_tb_lock;
-#endif
void tb_lock(void)
{
-#ifdef CONFIG_USER_ONLY
assert(!have_tb_lock);
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock++;
-#endif
}
void tb_unlock(void)
{
-#ifdef CONFIG_USER_ONLY
assert(have_tb_lock);
have_tb_lock--;
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
-#endif
}
bool tb_lock_recursive(void)
{
-#ifdef CONFIG_USER_ONLY
if (have_tb_lock) {
return false;
}
tb_lock();
-#endif
return true;
}
void tb_lock_reset(void)
{
-#ifdef CONFIG_USER_ONLY
if (have_tb_lock) {
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
have_tb_lock = 0;
}
-#endif
}
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,