@@ -635,26 +635,17 @@ static void cpu_release_index(CPUState *cpu)
return;
}
#endif
-void cpu_exec_exit(CPUState *cpu)
+void cpu_exec_unrealize(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
#endif
- if (cpu->cpu_index == -1) {
- /* cpu_index was never allocated by this @cpu or was already freed. */
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
- return;
- }
QTAILQ_REMOVE(&cpus, cpu, node);
- cpu_release_index(cpu);
- cpu->cpu_index = -1;
#if defined(CONFIG_USER_ONLY)
(void) cc;
cpu_list_unlock();
#else
@@ -666,13 +657,54 @@ void cpu_exec_exit(CPUState *cpu)
}
#endif
}
-void cpu_exec_init(CPUState *cpu, Error **errp)
+void cpu_exec_exit_cpu_index(CPUState *cpu)
+{
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
+ if (cpu->cpu_index == -1) {
+ /* cpu_index was never allocated by this @cpu or was already freed. */
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+ return;
+ }
+
+ cpu_release_index(cpu);
+ cpu->cpu_index = -1;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+}
+
+void cpu_exec_exit(CPUState *cpu)
+{
+ cpu_exec_unrealize(cpu);
+ cpu_exec_exit_cpu_index(cpu);
+}
+
+void cpu_exec_init_cpu_index(CPUState *cpu, Error **errp)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
Error *local_err = NULL;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
+ cpu->cpu_index = cpu_get_free_index(&local_err);
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
+ if (local_err) {
+ error_propagate(errp, local_err);
+ }
+}
+
+void cpu_exec_realize(CPUState *cpu, Error **errp)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
cpu->as = NULL;
cpu->num_ases = 0;
#ifndef CONFIG_USER_ONLY
@@ -695,16 +727,8 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
#endif
- cpu->cpu_index = cpu_get_free_index(&local_err);
- if (local_err) {
- error_propagate(errp, local_err);
-#if defined(CONFIG_USER_ONLY)
- cpu_list_unlock();
-#endif
- return;
- }
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
#if defined(CONFIG_USER_ONLY)
(void) cc;
cpu_list_unlock();
@@ -717,8 +741,21 @@ void cpu_exec_init(CPUState *cpu, Error **errp)
}
#endif
}
+void cpu_exec_init(CPUState *cpu, Error **errp)
+{
+ Error *local_err = NULL;
+
+ cpu_exec_init_cpu_index(cpu, &local_err);
+ if (local_err == NULL) {
+ cpu_exec_realize(cpu, &local_err);
+ }
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ }
+}
+
#if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{
tb_invalidate_phys_page_range(pc, pc + 1, 0);
@@ -56,8 +56,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
uint32_t flags,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
+void cpu_exec_init_cpu_index(CPUState *cpu, Error **errp);
+void cpu_exec_realize(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#if !defined(CONFIG_USER_ONLY)
@@ -854,8 +854,10 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
void cpu_exec_exit(CPUState *cpu);
+bool cpu_exec_exit_cpu_index(CPUState *cpu);
+void cpu_exec_unrealize(CPUState *cpu);
#ifdef CONFIG_SOFTMMU
extern const struct VMStateDescription vmstate_cpu_common;
#else
This patch splits the cpu_index bits from the rest of the cpu_exec init and exit code, so that they may be called separately. The goal is to be able to initialize cpu_index during cpu initialization and keep the rest for cpu realization. The cpu_exec_init() and cpu_exec_exit() functions are kept since most callers will stick to the current behaviour. Signed-off-by: Greg Kurz <groug@kaod.org> --- exec.c | 77 +++++++++++++++++++++++++++++++++++------------ include/exec/exec-all.h | 2 + include/qom/cpu.h | 2 + 3 files changed, 61 insertions(+), 20 deletions(-)