diff mbox

[4/8] KVM: x86: replace hrtimer based timer emulation

Message ID 20090708131721.GA3382@amt.cnet (mailing list archive)
State New, archived
Headers show

Commit Message

Marcelo Tosatti July 8, 2009, 1:17 p.m. UTC
On Wed, Jul 08, 2009 at 03:58:19PM +0300, Gleb Natapov wrote:
> Excellent patch series.
> 
> On Sun, Jul 05, 2009 at 10:55:15PM -0300, Marcelo Tosatti wrote:
> >  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> >  {
> > -	int ret;
> > +	ktime_t now, expires;
> >  
> > -	ret = pit_has_pending_timer(vcpu);
> > -	ret |= apic_has_pending_timer(vcpu);
> > +	expires = kvm_vcpu_next_timer_event(vcpu);
> > +	now = ktime_get();
> > +	if (expires.tv64 <= now.tv64) {
> > +		if (kvm_arch_interrupt_allowed(vcpu))
> > +			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
> You shouldn't unhalt vcpu here. Not every timer event will generate
> interrupt (vector can be masked in pic/ioapic)

Yeah. Note however that kvm_vcpu_next_timer_event only returns the
expiration time for events that have been acked (for timers that have
had events injected, but not acked it returns KTIME_MAX).

So, the code above will set one spurious unhalt if:

- inject timer irq
- guest acks irq
- guest mask irq
- unhalt (once)

I had a "kvm_timer_mask" callback before (along with the attached
patch), but decided to keep it simpler using the ack trick above.

I suppose one spurious unhalt is harmless, or is it a correctness issue?

> or timer event can generate NMI instead of interrupt.

In the NMI case it should not unhalt the processor?

(but yes, bypassing the irq injection system its not a very beatiful
shortcut, but its done in other places too eg i8254.c NMI injection via
all cpus LINT0).

> Leaving this code out probably means
> that you can't remove kvm_inject_pending_timer_irqs() call from
> __vcpu_run().
> 
> > +		return 1;
> > +	}
> >  
> > -	return ret;
> > +	return 0;
> >  }
> >  EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
> >  
> 
> --
> 			Gleb.

Comments

Gleb Natapov July 8, 2009, 1:39 p.m. UTC | #1
On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> On Wed, Jul 08, 2009 at 03:58:19PM +0300, Gleb Natapov wrote:
> > Excellent patch series.
> > 
> > On Sun, Jul 05, 2009 at 10:55:15PM -0300, Marcelo Tosatti wrote:
> > >  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> > >  {
> > > -	int ret;
> > > +	ktime_t now, expires;
> > >  
> > > -	ret = pit_has_pending_timer(vcpu);
> > > -	ret |= apic_has_pending_timer(vcpu);
> > > +	expires = kvm_vcpu_next_timer_event(vcpu);
> > > +	now = ktime_get();
> > > +	if (expires.tv64 <= now.tv64) {
> > > +		if (kvm_arch_interrupt_allowed(vcpu))
> > > +			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
> > You shouldn't unhalt vcpu here. Not every timer event will generate
> > interrupt (vector can be masked in pic/ioapic)
> 
> Yeah. Note however that kvm_vcpu_next_timer_event only returns the
> expiration time for events that have been acked (for timers that have
> had events injected, but not acked it returns KTIME_MAX).
> 
> So, the code above will set one spurious unhalt if:
> 
> - inject timer irq
> - guest acks irq
> - guest mask irq
> - unhalt (once)
> 
> I had a "kvm_timer_mask" callback before (along with the attached
> patch), but decided to keep it simpler using the ack trick above.
> 
> I suppose one spurious unhalt is harmless, or is it a correctness issue?
> 
This is correctness issue. We should be as close to real CPU as
possible. This will save us may hours of debugging later :)

> > or timer event can generate NMI instead of interrupt.
> 
> In the NMI case it should not unhalt the processor?
Why? It should. It should jump to NMI handler.

> 
> (but yes, bypassing the irq injection system its not a very beatiful
> shortcut, but its done in other places too eg i8254.c NMI injection via
> all cpus LINT0).
> 
> > Leaving this code out probably means
> > that you can't remove kvm_inject_pending_timer_irqs() call from
> > __vcpu_run().
> > 
> > > +		return 1;
> > > +	}
> > >  
> > > -	return ret;
> > > +	return 0;
> > >  }
> > >  EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
> > >  
> > 
> > --
> > 			Gleb.

> Index: kvm/arch/x86/kvm/i8259.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/i8259.c
> +++ kvm/arch/x86/kvm/i8259.c
> @@ -122,6 +122,18 @@ static inline int get_priority(struct kv
>  	return priority;
>  }
>  
> +int pic_int_is_masked(struct kvm *kvm, int irq)
> +{
> +	u8 imr;
> +	struct kvm_pic *s = pic_irqchip(kvm);
> +
> +	pic_lock(s);
> +	imr = s->pics[SELECT_PIC(irq)].imr;
> +	pic_unlock(s);
> +
> +	return (imr & (1 << irq));
> +}
> +
>  /*
>   * return the pic wanted interrupt. return -1 if none
>   */
> Index: kvm/arch/x86/kvm/irq.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/irq.c
> +++ kvm/arch/x86/kvm/irq.c
> @@ -42,6 +42,29 @@ int kvm_cpu_has_pending_timer(struct kvm
>  EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
>  
>  /*
> + * The PIT interrupt can be masked on different levels: PIC/IOAPIC/LAPIC.
> + * Hide the maze from the PIT mask notifier, all it cares about is whether
> + * the interrupt can reach the processor.
> + */
> +void kvm_pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask)
> +{
> +	bool masked = 0;
> +	struct kvm *kvm;
> +	struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
> +
> +	kvm = pit->kvm;
> +
> +	/* LAPIC hardware disabled or INTIN0 programmed as ExtINT */
> +	if (kvm_apic_accept_pic_intr(kvm->vcpus[0])) {
> +		masked = pic_int_is_masked(kvm, 0);
> +	/* IOAPIC -> LAPIC */
> +	} else
> +		masked = ioapic_int_is_masked(kvm, 0);
> +
> +	kvm_pit_internal_mask_notifier(kvm, mask);
> +}
> +
> +/*
>   * check if there is pending interrupt without
>   * intack.
>   */
> Index: kvm/virt/kvm/ioapic.c
> ===================================================================
> --- kvm.orig/virt/kvm/ioapic.c
> +++ kvm/virt/kvm/ioapic.c
> @@ -101,6 +101,16 @@ static int ioapic_service(struct kvm_ioa
>  	return injected;
>  }
>  
> +int ioapic_int_is_masked(struct kvm *kvm, int pin)
> +{
> +	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
> +
> +	if (!ioapic)
> +		return 1;
> +
> +	return ioapic->redirtbl[pin].fields.mask;
> +}
> +
>  static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
>  {
>  	unsigned index;
> Index: kvm/arch/x86/kvm/i8254.c
> ===================================================================
> --- kvm.orig/arch/x86/kvm/i8254.c
> +++ kvm/arch/x86/kvm/i8254.c
> @@ -558,9 +558,9 @@ void kvm_pit_reset(struct kvm_pit *pit)
>  	pit->pit_state.irq_ack = 1;
>  }
>  
> -static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
> +void kvm_pit_internal_mask_notifier(struct kvm *kvm, bool mask)
>  {
> -	struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
> +	struct kvm_pit *pit = kvm->arch.vpit;
>  
>  	if (!mask) {
>  		atomic_set(&pit->pit_state.pit_timer.pending, 0);
> @@ -614,8 +614,8 @@ struct kvm_pit *kvm_create_pit(struct kv
>  
>  	kvm_pit_reset(pit);
>  
> -	pit->mask_notifier.func = pit_mask_notifer;
> -	kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
> +	pit->mask_notifier.func = kvm_pit_mask_notifier;
> +	kvm_register_irq_mask_notifier(kvm, 0, &kvm->arch.vpit->mask_notifier);
>  
>  	kvm_iodevice_init(&pit->dev, &pit_dev_ops);
>  	kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
> Index: kvm/arch/x86/kvm/irq.h
> ===================================================================
> --- kvm.orig/arch/x86/kvm/irq.h
> +++ kvm/arch/x86/kvm/irq.h
> @@ -98,7 +98,13 @@ void __kvm_migrate_apic_timer(struct kvm
>  void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
>  void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
>  
> +void kvm_pit_internal_mask_notifier(struct kvm *kvm, bool mask);
> +void kvm_pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask);
> +
>  int pit_has_pending_timer(struct kvm_vcpu *vcpu);
>  int apic_has_pending_timer(struct kvm_vcpu *vcpu);
>  
> +int pic_int_is_masked(struct kvm *kvm, int irq);
> +int ioapic_int_is_masked(struct kvm *kvm, int irq);
> +
>  #endif


--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti July 8, 2009, 3:42 p.m. UTC | #2
On Wed, Jul 08, 2009 at 04:39:56PM +0300, Gleb Natapov wrote:
> On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> > On Wed, Jul 08, 2009 at 03:58:19PM +0300, Gleb Natapov wrote:
> > > Excellent patch series.
> > > 
> > > On Sun, Jul 05, 2009 at 10:55:15PM -0300, Marcelo Tosatti wrote:
> > > >  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> > > >  {
> > > > -	int ret;
> > > > +	ktime_t now, expires;
> > > >  
> > > > -	ret = pit_has_pending_timer(vcpu);
> > > > -	ret |= apic_has_pending_timer(vcpu);
> > > > +	expires = kvm_vcpu_next_timer_event(vcpu);
> > > > +	now = ktime_get();
> > > > +	if (expires.tv64 <= now.tv64) {
> > > > +		if (kvm_arch_interrupt_allowed(vcpu))
> > > > +			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
> > > You shouldn't unhalt vcpu here. Not every timer event will generate
> > > interrupt (vector can be masked in pic/ioapic)
> > 
> > Yeah. Note however that kvm_vcpu_next_timer_event only returns the
> > expiration time for events that have been acked (for timers that have
> > had events injected, but not acked it returns KTIME_MAX).
> > 
> > So, the code above will set one spurious unhalt if:
> > 
> > - inject timer irq
> > - guest acks irq
> > - guest mask irq
> > - unhalt (once)
> > 
> > I had a "kvm_timer_mask" callback before (along with the attached
> > patch), but decided to keep it simpler using the ack trick above.
> > 
> > I suppose one spurious unhalt is harmless, or is it a correctness issue?
> > 
> This is correctness issue. We should be as close to real CPU as
> possible. This will save us may hours of debugging later :)

Hum, fine. Will update the kvm_timer_mask patch below and let you know.

> > > or timer event can generate NMI instead of interrupt.
> > 
> > In the NMI case it should not unhalt the processor?
> Why? It should. It should jump to NMI handler.

I meant unhalt as in KVM_REQ_UNHALT so vcpu_enter_guest runs.

What did you mention about ISR/IRR again?

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 8, 2009, 4:13 p.m. UTC | #3
On Wed, Jul 08, 2009 at 12:42:52PM -0300, Marcelo Tosatti wrote:
> On Wed, Jul 08, 2009 at 04:39:56PM +0300, Gleb Natapov wrote:
> > On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> > > On Wed, Jul 08, 2009 at 03:58:19PM +0300, Gleb Natapov wrote:
> > > > Excellent patch series.
> > > > 
> > > > On Sun, Jul 05, 2009 at 10:55:15PM -0300, Marcelo Tosatti wrote:
> > > > >  int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
> > > > >  {
> > > > > -	int ret;
> > > > > +	ktime_t now, expires;
> > > > >  
> > > > > -	ret = pit_has_pending_timer(vcpu);
> > > > > -	ret |= apic_has_pending_timer(vcpu);
> > > > > +	expires = kvm_vcpu_next_timer_event(vcpu);
> > > > > +	now = ktime_get();
> > > > > +	if (expires.tv64 <= now.tv64) {
> > > > > +		if (kvm_arch_interrupt_allowed(vcpu))
> > > > > +			set_bit(KVM_REQ_UNHALT, &vcpu->requests);
> > > > You shouldn't unhalt vcpu here. Not every timer event will generate
> > > > interrupt (vector can be masked in pic/ioapic)
> > > 
> > > Yeah. Note however that kvm_vcpu_next_timer_event only returns the
> > > expiration time for events that have been acked (for timers that have
> > > had events injected, but not acked it returns KTIME_MAX).
> > > 
> > > So, the code above will set one spurious unhalt if:
> > > 
> > > - inject timer irq
> > > - guest acks irq
> > > - guest mask irq
> > > - unhalt (once)
> > > 
> > > I had a "kvm_timer_mask" callback before (along with the attached
> > > patch), but decided to keep it simpler using the ack trick above.
> > > 
> > > I suppose one spurious unhalt is harmless, or is it a correctness issue?
> > > 
> > This is correctness issue. We should be as close to real CPU as
> > possible. This will save us may hours of debugging later :)
> 
> Hum, fine. Will update the kvm_timer_mask patch below and let you know.
> 
> > > > or timer event can generate NMI instead of interrupt.
> > > 
> > > In the NMI case it should not unhalt the processor?
> > Why? It should. It should jump to NMI handler.
> 
> I meant unhalt as in KVM_REQ_UNHALT so vcpu_enter_guest runs.
> 
Yes. It should. Inside vcpu_enter_guest() NMI will be injected
and nmi handler will be executed.

> What did you mention about ISR/IRR again?
On real HW the following may happen:
 Timer interrupt delivered to apic and placed into IRR
 Timer interrupt delivered to cpu and moved from IRR to ISR
 New timer interrupt delivered to apic and placed into IRR before
  previous one is acked.

In your patch ktimer->can_inject is set to false when timer is injected
and next interrupt is injected only after OS acks previous timer. So the
above situation cannot happen. I don't know if this important or not. It
is possible to write code that will work only with former behaviour, but
I don't see why somebody will want to do that. We can emulate former behaviour
though. If we will no rely on acks to count delivered event but make ->inject
callback return status that will indicate if interrupt was delivered to apic or
not.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 8, 2009, 4:29 p.m. UTC | #4
On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> (but yes, bypassing the irq injection system its not a very beatiful
> shortcut, but its done in other places too eg i8254.c NMI injection via
> all cpus LINT0).
> 
I've looked at this. Why do you say i8254.c NMI injection bypass the irq
injection system? It look to me like it uses usual way to send interrupt
to all cpus.

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti July 8, 2009, 4:37 p.m. UTC | #5
On Wed, Jul 08, 2009 at 07:29:58PM +0300, Gleb Natapov wrote:
> On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> > (but yes, bypassing the irq injection system its not a very beatiful
> > shortcut, but its done in other places too eg i8254.c NMI injection via
> > all cpus LINT0).
> > 
> I've looked at this. Why do you say i8254.c NMI injection bypass the irq
> injection system? It look to me like it uses usual way to send interrupt
> to all cpus.

It goes through apic_accept_irq so its somewhat fine. But it accesses
the lapics of other vcpus locklessly (which does not happen via
kvm_set_irq path due to irq_lock protection).

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Gleb Natapov July 8, 2009, 4:40 p.m. UTC | #6
On Wed, Jul 08, 2009 at 01:37:39PM -0300, Marcelo Tosatti wrote:
> On Wed, Jul 08, 2009 at 07:29:58PM +0300, Gleb Natapov wrote:
> > On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> > > (but yes, bypassing the irq injection system its not a very beatiful
> > > shortcut, but its done in other places too eg i8254.c NMI injection via
> > > all cpus LINT0).
> > > 
> > I've looked at this. Why do you say i8254.c NMI injection bypass the irq
> > injection system? It look to me like it uses usual way to send interrupt
> > to all cpus.
> 
> It goes through apic_accept_irq so its somewhat fine. But it accesses
> the lapics of other vcpus locklessly (which does not happen via
> kvm_set_irq path due to irq_lock protection).
Ah, that the bug then. But otherwise it goes through usual interrupt
logic. Do you know why the lock is missing? Due to potential deadlock or
we just forget to lock?

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Marcelo Tosatti July 8, 2009, 4:47 p.m. UTC | #7
On Wed, Jul 08, 2009 at 07:40:57PM +0300, Gleb Natapov wrote:
> On Wed, Jul 08, 2009 at 01:37:39PM -0300, Marcelo Tosatti wrote:
> > On Wed, Jul 08, 2009 at 07:29:58PM +0300, Gleb Natapov wrote:
> > > On Wed, Jul 08, 2009 at 10:17:21AM -0300, Marcelo Tosatti wrote:
> > > > (but yes, bypassing the irq injection system its not a very beatiful
> > > > shortcut, but its done in other places too eg i8254.c NMI injection via
> > > > all cpus LINT0).
> > > > 
> > > I've looked at this. Why do you say i8254.c NMI injection bypass the irq
> > > injection system? It look to me like it uses usual way to send interrupt
> > > to all cpus.
> > 
> > It goes through apic_accept_irq so its somewhat fine. But it accesses
> > the lapics of other vcpus locklessly (which does not happen via
> > kvm_set_irq path due to irq_lock protection).
> Ah, that the bug then. But otherwise it goes through usual interrupt
> logic. Do you know why the lock is missing? Due to potential deadlock or
> we just forget to lock?

There is no lock to protect the lapics right, normally isr setting is
lockless which is fine, but kvm_apic_local_deliver also reads a
register. Hum, should be fine though.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

Index: kvm/arch/x86/kvm/i8259.c
===================================================================
--- kvm.orig/arch/x86/kvm/i8259.c
+++ kvm/arch/x86/kvm/i8259.c
@@ -122,6 +122,18 @@  static inline int get_priority(struct kv
 	return priority;
 }
 
+int pic_int_is_masked(struct kvm *kvm, int irq)
+{
+	u8 imr;
+	struct kvm_pic *s = pic_irqchip(kvm);
+
+	pic_lock(s);
+	imr = s->pics[SELECT_PIC(irq)].imr;
+	pic_unlock(s);
+
+	return (imr & (1 << irq));
+}
+
 /*
  * return the pic wanted interrupt. return -1 if none
  */
Index: kvm/arch/x86/kvm/irq.c
===================================================================
--- kvm.orig/arch/x86/kvm/irq.c
+++ kvm/arch/x86/kvm/irq.c
@@ -42,6 +42,29 @@  int kvm_cpu_has_pending_timer(struct kvm
 EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
 
 /*
+ * The PIT interrupt can be masked on different levels: PIC/IOAPIC/LAPIC.
+ * Hide the maze from the PIT mask notifier, all it cares about is whether
+ * the interrupt can reach the processor.
+ */
+void kvm_pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask)
+{
+	bool masked = 0;
+	struct kvm *kvm;
+	struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
+
+	kvm = pit->kvm;
+
+	/* LAPIC hardware disabled or INTIN0 programmed as ExtINT */
+	if (kvm_apic_accept_pic_intr(kvm->vcpus[0])) {
+		masked = pic_int_is_masked(kvm, 0);
+	/* IOAPIC -> LAPIC */
+	} else
+		masked = ioapic_int_is_masked(kvm, 0);
+
+	kvm_pit_internal_mask_notifier(kvm, mask);
+}
+
+/*
  * check if there is pending interrupt without
  * intack.
  */
Index: kvm/virt/kvm/ioapic.c
===================================================================
--- kvm.orig/virt/kvm/ioapic.c
+++ kvm/virt/kvm/ioapic.c
@@ -101,6 +101,16 @@  static int ioapic_service(struct kvm_ioa
 	return injected;
 }
 
+int ioapic_int_is_masked(struct kvm *kvm, int pin)
+{
+	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+	if (!ioapic)
+		return 1;
+
+	return ioapic->redirtbl[pin].fields.mask;
+}
+
 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 {
 	unsigned index;
Index: kvm/arch/x86/kvm/i8254.c
===================================================================
--- kvm.orig/arch/x86/kvm/i8254.c
+++ kvm/arch/x86/kvm/i8254.c
@@ -558,9 +558,9 @@  void kvm_pit_reset(struct kvm_pit *pit)
 	pit->pit_state.irq_ack = 1;
 }
 
-static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
+void kvm_pit_internal_mask_notifier(struct kvm *kvm, bool mask)
 {
-	struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
+	struct kvm_pit *pit = kvm->arch.vpit;
 
 	if (!mask) {
 		atomic_set(&pit->pit_state.pit_timer.pending, 0);
@@ -614,8 +614,8 @@  struct kvm_pit *kvm_create_pit(struct kv
 
 	kvm_pit_reset(pit);
 
-	pit->mask_notifier.func = pit_mask_notifer;
-	kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+	pit->mask_notifier.func = kvm_pit_mask_notifier;
+	kvm_register_irq_mask_notifier(kvm, 0, &kvm->arch.vpit->mask_notifier);
 
 	kvm_iodevice_init(&pit->dev, &pit_dev_ops);
 	kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
Index: kvm/arch/x86/kvm/irq.h
===================================================================
--- kvm.orig/arch/x86/kvm/irq.h
+++ kvm/arch/x86/kvm/irq.h
@@ -98,7 +98,13 @@  void __kvm_migrate_apic_timer(struct kvm
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
 void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
 
+void kvm_pit_internal_mask_notifier(struct kvm *kvm, bool mask);
+void kvm_pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask);
+
 int pit_has_pending_timer(struct kvm_vcpu *vcpu);
 int apic_has_pending_timer(struct kvm_vcpu *vcpu);
 
+int pic_int_is_masked(struct kvm *kvm, int irq);
+int ioapic_int_is_masked(struct kvm *kvm, int irq);
+
 #endif