diff mbox

[4/6] Handle vcpu init/sipi by calling a function on vcpu

Message ID 1244976742-22926-4-git-send-email-gleb@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gleb Natapov June 14, 2009, 10:52 a.m. UTC
Instead of having special case in vcpu event loop.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
 cpu-defs.h |    2 --
 qemu-kvm.c |   51 +++++++++++++++++----------------------------------
 2 files changed, 17 insertions(+), 36 deletions(-)

Comments

Avi Kivity June 15, 2009, 10:03 a.m. UTC | #1
On 06/14/2009 01:52 PM, Gleb Natapov wrote:
> Instead of having special case in vcpu event loop.
>
>    

I'm a little worried about two vcpus INITing each other simultaneously 
and deadlocking.  INIT/SIPI are async events, the initiator should not 
wait for them.
Gleb Natapov June 15, 2009, 10:11 a.m. UTC | #2
On Mon, Jun 15, 2009 at 01:03:39PM +0300, Avi Kivity wrote:
> On 06/14/2009 01:52 PM, Gleb Natapov wrote:
>> Instead of having special case in vcpu event loop.
>>
>>    
>
> I'm a little worried about two vcpus INITing each other simultaneously  
> and deadlocking.  INIT/SIPI are async events, the initiator should not  
> wait for them.
>
I thought to add on_vcpu_async() for that (if this case is worth warring about).

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 15, 2009, 10:14 a.m. UTC | #3
On 06/15/2009 01:11 PM, Gleb Natapov wrote:
> On Mon, Jun 15, 2009 at 01:03:39PM +0300, Avi Kivity wrote:
>    
>> On 06/14/2009 01:52 PM, Gleb Natapov wrote:
>>      
>>> Instead of having special case in vcpu event loop.
>>>
>>>
>>>        
>> I'm a little worried about two vcpus INITing each other simultaneously
>> and deadlocking.  INIT/SIPI are async events, the initiator should not
>> wait for them.
>>
>>      
> I thought to add on_vcpu_async() for that (if this case is worth warring about).
>    

A generic on_vcpu_async() would need to allocate, that might be expoitable.
Gleb Natapov June 15, 2009, 10:16 a.m. UTC | #4
On Mon, Jun 15, 2009 at 01:14:21PM +0300, Avi Kivity wrote:
> On 06/15/2009 01:11 PM, Gleb Natapov wrote:
>> On Mon, Jun 15, 2009 at 01:03:39PM +0300, Avi Kivity wrote:
>>    
>>> On 06/14/2009 01:52 PM, Gleb Natapov wrote:
>>>      
>>>> Instead of having special case in vcpu event loop.
>>>>
>>>>
>>>>        
>>> I'm a little worried about two vcpus INITing each other simultaneously
>>> and deadlocking.  INIT/SIPI are async events, the initiator should not
>>> wait for them.
>>>
>>>      
>> I thought to add on_vcpu_async() for that (if this case is worth warring about).
>>    
>
> A generic on_vcpu_async() would need to allocate, that might be expoitable.
>
Then what about processing events while waiting in on_vcpu()?

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Avi Kivity June 15, 2009, 10:26 a.m. UTC | #5
On 06/15/2009 01:16 PM, Gleb Natapov wrote:
>> A generic on_vcpu_async() would need to allocate, that might be expoitable.
>>
>>      
> Then what about processing events while waiting in on_vcpu()?
>
>    

Could work, but prefer a simpler solution.
diff mbox

Patch

diff --git a/cpu-defs.h b/cpu-defs.h
index e17209a..7570096 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -140,8 +140,6 @@  typedef struct CPUWatchpoint {
 struct qemu_work_item;
 
 struct KVMCPUState {
-    int sipi_needed;
-    int init;
     pthread_t thread;
     int signalled;
     int stop;
diff --git a/qemu-kvm.c b/qemu-kvm.c
index 5fa7154..af3fd91 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -134,19 +134,6 @@  void kvm_update_interrupt_request(CPUState *env)
     }
 }
 
-void kvm_update_after_sipi(CPUState *env)
-{
-    env->kvm_cpu_state.sipi_needed = 1;
-    kvm_update_interrupt_request(env);
-}
-
-void kvm_apic_init(CPUState *env)
-{
-    if (env->cpu_index != 0)
-	env->kvm_cpu_state.init = 1;
-    kvm_update_interrupt_request(env);
-}
-
 #include <signal.h>
 
 static int kvm_try_push_interrupts(void *opaque)
@@ -331,30 +318,32 @@  static void kvm_vm_state_change_handler(void *context, int running, int reason)
 	pause_all_threads();
 }
 
-static void update_regs_for_sipi(CPUState *env)
+static void update_regs_for_sipi(void *data)
 {
-    kvm_arch_update_regs_for_sipi(env);
-    env->kvm_cpu_state.sipi_needed = 0;
+    kvm_arch_update_regs_for_sipi(data);
 }
 
-static void update_regs_for_init(CPUState *env)
+void kvm_update_after_sipi(CPUState *env)
 {
-#ifdef TARGET_I386
-    SegmentCache cs = env->segs[R_CS];
-#endif
-
-    cpu_reset(env);
+    on_vcpu(env, update_regs_for_sipi, env);
+}
 
-#ifdef TARGET_I386
-    /* restore SIPI vector */
-    if(env->kvm_cpu_state.sipi_needed)
-        env->segs[R_CS] = cs;
-#endif
+static void update_regs_for_init(void *data)
+{
+    CPUState *env = data;
 
-    env->kvm_cpu_state.init = 0;
     kvm_arch_load_regs(env);
 }
 
+void kvm_apic_init(CPUState *env)
+{
+    if (env->cpu_index != 0) {
+        if (env->kvm_cpu_state.created)
+            on_vcpu(env, update_regs_for_init, env);
+    } else
+        kvm_update_interrupt_request(env);
+}
+
 static void setup_kernel_sigmask(CPUState *env)
 {
     sigset_t set;
@@ -406,12 +395,6 @@  static int kvm_main_loop_cpu(CPUState *env)
 	    kvm_main_loop_wait(env, 1000);
 	if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
 	    env->halted = 0;
-    if (!kvm_irqchip_in_kernel(kvm_context)) {
-	    if (env->kvm_cpu_state.init)
-	        update_regs_for_init(env);
-	    if (env->kvm_cpu_state.sipi_needed)
-	        update_regs_for_sipi(env);
-    }
 	if (!env->halted || kvm_irqchip_in_kernel(kvm_context))
 	    kvm_cpu_exec(env);
 	env->exit_request = 0;