From patchwork Tue Feb 1 21:16:04 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Kiszka X-Patchwork-Id: 524011 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p11LHxLF000732 for ; Tue, 1 Feb 2011 21:18:11 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752480Ab1BAVRM (ORCPT ); Tue, 1 Feb 2011 16:17:12 -0500 Received: from fmmailgate01.web.de ([217.72.192.221]:60094 "EHLO fmmailgate01.web.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752380Ab1BAVRI (ORCPT ); Tue, 1 Feb 2011 16:17:08 -0500 Received: from smtp05.web.de ( [172.20.4.166]) by fmmailgate01.web.de (Postfix) with ESMTP id 9DA57186D8AF7; Tue, 1 Feb 2011 22:16:16 +0100 (CET) Received: from [88.65.41.52] (helo=localhost.localdomain) by smtp05.web.de with asmtp (TLSv1:AES256-SHA:256) (WEB.DE 4.110 #2) id 1PkNaC-0006FM-01; Tue, 01 Feb 2011 22:16:16 +0100 From: Jan Kiszka To: Avi Kivity , Marcelo Tosatti Cc: kvm@vger.kernel.org, qemu-devel@nongnu.org Subject: [PATCH v2 24/24] Fix a few coding style violations in cpus.c Date: Tue, 1 Feb 2011 22:16:04 +0100 Message-Id: <5e37ebb378a0fd46ea65681e37825ed1ed39c036.1296594961.git.jan.kiszka@web.de> X-Mailer: git-send-email 1.7.1 In-Reply-To: References: In-Reply-To: References: X-Sender: jan.kiszka@web.de X-Provags-ID: V01U2FsdGVkX1/6znKgYEfcbA5hD+yxyI1bKMB8RGGPAOLaoLGj T50ugShxUryC3ZNJZPWVoMmzNf6Mshi1QIwbVt8QSgPY5TK+0l HZbM4grXY= Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Tue, 01 Feb 2011 21:18:11 +0000 (UTC) diff --git a/cpus.c b/cpus.c index 0d11a20..dd24fe8 100644 --- a/cpus.c +++ b/cpus.c @@ -138,25 +138,26 @@ static void do_vm_stop(int reason) static int cpu_can_run(CPUState *env) { - if (env->stop) + if (env->stop) { return 0; - if (env->stopped || !vm_running) + } + if (env->stopped || !vm_running) { return 0; + } return 1; } static int cpu_has_work(CPUState *env) { - if (env->stop) + if (env->stop || env->queued_work_first) { return 1; - if (env->queued_work_first) - return 1; - if (env->stopped || !vm_running) + } + if (env->stopped || !vm_running) { return 0; - if (!env->halted) - return 1; - if (qemu_cpu_has_work(env)) + } + if (!env->halted || qemu_cpu_has_work(env)) { return 1; + } return 0; } @@ -164,9 +165,11 @@ static int any_cpu_has_work(void) { CPUState *env; - for (env = first_cpu; env != NULL; env = env->next_cpu) - if (cpu_has_work(env)) + for (env = first_cpu; env != NULL; env = env->next_cpu) { + if (cpu_has_work(env)) { return 1; + } + } return 0; } @@ -232,9 +235,9 @@ static void qemu_event_increment(void) static const uint64_t val = 1; ssize_t ret; - if (io_thread_fd == -1) + if (io_thread_fd == -1) { return; - + } do { ret = write(io_thread_fd, &val, sizeof(val)); } while (ret < 0 && errno == EINTR); @@ -265,17 +268,17 @@ static int qemu_event_init(void) int fds[2]; err = qemu_eventfd(fds); - if (err == -1) + if (err == -1) { return -errno; - + } err = fcntl_setfl(fds[0], O_NONBLOCK); - if (err < 0) + if (err < 0) { goto fail; - + } err = fcntl_setfl(fds[1], O_NONBLOCK); - if (err < 0) + if (err < 0) { goto fail; - + } qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, (void *)(unsigned long)fds[0]); @@ -534,7 +537,6 @@ void pause_all_vcpus(void) void qemu_cpu_kick(void *env) { - return; } void qemu_cpu_kick_self(void) @@ -663,13 +665,15 @@ int qemu_init_main_loop(void) blocked_signals = block_io_signals(); ret = qemu_signalfd_init(blocked_signals); - if (ret) + if (ret) { return ret; + } /* Note eventfd must be drained before signalfd handlers run */ ret = qemu_event_init(); - if (ret) + if (ret) { return ret; + } qemu_cond_init(&qemu_pause_cond); qemu_cond_init(&qemu_system_cond); @@ -699,10 +703,11 @@ void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) wi.func = func; wi.data = data; - if (!env->queued_work_first) + if (!env->queued_work_first) { env->queued_work_first = &wi; - else + } else { env->queued_work_last->next = &wi; + } env->queued_work_last = &wi; wi.next = NULL; wi.done = false; @@ -720,8 +725,9 @@ static void flush_queued_work(CPUState *env) { struct qemu_work_item *wi; - if (!env->queued_work_first) + if (!env->queued_work_first) { return; + } while ((wi = env->queued_work_first)) { env->queued_work_first = wi->next; @@ -747,8 +753,9 @@ static void qemu_tcg_wait_io_event(void) { CPUState *env; - while (!any_cpu_has_work()) + while (!any_cpu_has_work()) { qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000); + } qemu_mutex_unlock(&qemu_global_mutex); @@ -769,9 +776,9 @@ static void qemu_tcg_wait_io_event(void) static void qemu_kvm_wait_io_event(CPUState *env) { - while (!cpu_has_work(env)) + while (!cpu_has_work(env)) { qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000); - + } qemu_kvm_eat_signals(env); qemu_wait_io_event_common(env); } @@ -799,12 +806,14 @@ static void *qemu_kvm_cpu_thread_fn(void *arg) qemu_cond_signal(&qemu_cpu_cond); /* and wait for machine initialization */ - while (!qemu_system_ready) + while (!qemu_system_ready) { qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); + } while (1) { - if (cpu_can_run(env)) + if (cpu_can_run(env)) { qemu_cpu_exec(env); + } qemu_kvm_wait_io_event(env); } @@ -820,13 +829,15 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) /* signal CPU creation */ qemu_mutex_lock(&qemu_global_mutex); - for (env = first_cpu; env != NULL; env = env->next_cpu) + for (env = first_cpu; env != NULL; env = env->next_cpu) { env->created = 1; + } qemu_cond_signal(&qemu_cpu_cond); /* and wait for machine initialization */ - while (!qemu_system_ready) + while (!qemu_system_ready) { qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); + } while (1) { cpu_exec_all(); @@ -839,6 +850,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) void qemu_cpu_kick(void *_env) { CPUState *env = _env; + qemu_cond_broadcast(env->halt_cond); if (!env->thread_kicked) { qemu_thread_signal(env->thread, SIG_IPI); @@ -890,8 +902,9 @@ static int all_vcpus_paused(void) CPUState *penv = first_cpu; while (penv) { - if (!penv->stopped) + if (!penv->stopped) { return 0; + } penv = (CPUState *)penv->next_cpu; } @@ -933,14 +946,16 @@ void resume_all_vcpus(void) static void qemu_tcg_init_vcpu(void *_env) { CPUState *env = _env; + /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { env->thread = qemu_mallocz(sizeof(QemuThread)); env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); - while (env->created == 0) + while (env->created == 0) { qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); + } tcg_cpu_thread = env->thread; tcg_halt_cond = env->halt_cond; } else { @@ -955,8 +970,9 @@ static void qemu_kvm_start_vcpu(CPUState *env) env->halt_cond = qemu_mallocz(sizeof(QemuCond)); qemu_cond_init(env->halt_cond); qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); - while (env->created == 0) + while (env->created == 0) { qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); + } } void qemu_init_vcpu(void *_env) @@ -965,10 +981,11 @@ void qemu_init_vcpu(void *_env) env->nr_cores = smp_cores; env->nr_threads = smp_threads; - if (kvm_enabled()) + if (kvm_enabled()) { qemu_kvm_start_vcpu(env); - else + } else { qemu_tcg_init_vcpu(env); + } } void qemu_notify_event(void) @@ -1043,16 +1060,18 @@ bool cpu_exec_all(void) { int r; - if (next_cpu == NULL) + if (next_cpu == NULL) { next_cpu = first_cpu; + } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); - if (qemu_alarm_pending()) + if (qemu_alarm_pending()) { break; + } if (cpu_can_run(env)) { r = qemu_cpu_exec(env); if (kvm_enabled()) {