From patchwork Thu Jun 18 12:22:28 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gleb Natapov X-Patchwork-Id: 31110 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n5ICMYpn031689 for ; Thu, 18 Jun 2009 12:22:34 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754102AbZFRMW2 (ORCPT ); Thu, 18 Jun 2009 08:22:28 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1754613AbZFRMW2 (ORCPT ); Thu, 18 Jun 2009 08:22:28 -0400 Received: from mx2.redhat.com ([66.187.237.31]:33826 "EHLO mx2.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752876AbZFRMW2 (ORCPT ); Thu, 18 Jun 2009 08:22:28 -0400 Received: from int-mx2.corp.redhat.com (int-mx2.corp.redhat.com [172.16.27.26]) by mx2.redhat.com (8.13.8/8.13.8) with ESMTP id n5ICMVs2001321 for ; Thu, 18 Jun 2009 08:22:31 -0400 Received: from ns3.rdu.redhat.com (ns3.rdu.redhat.com [10.11.255.199]) by int-mx2.corp.redhat.com (8.13.1/8.13.1) with ESMTP id n5ICMU7C006033; Thu, 18 Jun 2009 08:22:30 -0400 Received: from dhcp-1-237.tlv.redhat.com (dhcp-1-237.tlv.redhat.com [10.35.1.237]) by ns3.rdu.redhat.com (8.13.8/8.13.8) with ESMTP id n5ICMTJU015074; Thu, 18 Jun 2009 08:22:29 -0400 Received: by dhcp-1-237.tlv.redhat.com (Postfix, from userid 13519) id CC84D18D479; Thu, 18 Jun 2009 15:22:28 +0300 (IDT) Date: Thu, 18 Jun 2009 15:22:28 +0300 From: Gleb Natapov To: avi@redhat.com Cc: kvm@vger.kernel.org Subject: [PATCH] Cleanup cpu loop Message-ID: <20090618122228.GE20289@redhat.com> MIME-Version: 1.0 Content-Disposition: inline X-Scanned-By: MIMEDefang 2.58 on 172.16.27.26 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Rearrange cpu loop to be (hopefully) more readable. Put difference between kernel/userspace irqchip in one place. Signed-off-by: Gleb Natapov --- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/qemu-kvm.c b/qemu-kvm.c index 4129fe2..72a081d 100644 --- a/qemu-kvm.c +++ b/qemu-kvm.c @@ -1746,15 +1746,9 @@ int kvm_cpu_exec(CPUState *env) return 0; } -static int has_work(CPUState *env) +static int is_cpu_stopped(CPUState *env) { - if (!vm_running || (env && env->kvm_cpu_state.stopped)) - return 0; - if (kvm_irqchip_in_kernel(kvm_context)) - return 1; - if (!env->halted) - return 1; - return kvm_arch_has_work(env); + return !vm_running || env->kvm_cpu_state.stopped; } static void flush_queued_work(CPUState *env) @@ -1877,6 +1871,8 @@ static void update_regs_for_init(CPUState *env) #endif cpu_reset(env); + /* cpu_reset() clears env->halted, cpu should be halted after init */ + env->halted = 1; #ifdef TARGET_I386 /* restore SIPI vector */ @@ -1920,6 +1916,16 @@ static void qemu_kvm_system_reset(void) resume_all_threads(); } +static void process_irqchip_events(CPUState *env) +{ + if (env->kvm_cpu_state.init) + update_regs_for_init(env); + if (env->kvm_cpu_state.sipi_needed) + update_regs_for_sipi(env); + if (kvm_arch_has_work(env)) + env->halted = 0; +} + static int kvm_main_loop_cpu(CPUState *env) { setup_kernel_sigmask(env); @@ -1935,19 +1941,14 @@ static int kvm_main_loop_cpu(CPUState *env) kvm_arch_load_regs(env); while (1) { - while (!has_work(env)) - kvm_main_loop_wait(env, 1000); - if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI)) - env->halted = 0; - if (!kvm_irqchip_in_kernel(kvm_context)) { - if (env->kvm_cpu_state.init) - update_regs_for_init(env); - if (env->kvm_cpu_state.sipi_needed) - update_regs_for_sipi(env); + int run_cpu = is_cpu_stopped(env) ? 0 : 1; + if (run_cpu && !kvm_irqchip_in_kernel(kvm_context)) { + process_irqchip_events(env); + run_cpu = !env->halted; } - if (!env->halted || kvm_irqchip_in_kernel(kvm_context)) - kvm_cpu_exec(env); - kvm_main_loop_wait(env, 0); + kvm_main_loop_wait(env, run_cpu ? 0 : 1000); + if (run_cpu) + kvm_cpu_exec(env); } pthread_mutex_unlock(&qemu_mutex); return 0;