Message ID | 20090323172725.GA28775@amt.cnet (mailing list archive) |
---|---|
State | Accepted |
Headers | show |
Marcelo Tosatti wrote: >> Maybe it's best to resync when relinking a global page? >> > > How about this. It will shorten the unsync period of global pages, > unfortunately. > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 2a36f7f..bccdcc7 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -1238,6 +1238,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, > set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); > kvm_mmu_mark_parents_unsync(vcpu, sp); > } > + if (role.level != PT_PAGE_TABLE_LEVEL && > + !list_empty(&vcpu->kvm->arch.oos_global_pages)) > + set_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests); > + > pgprintk("%s: found\n", __func__); > return sp; > } > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 2ea8262..48169d7 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -3109,6 +3109,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > kvm_write_guest_time(vcpu); > if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) > kvm_mmu_sync_roots(vcpu); > + if (test_and_clear_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests)) > + kvm_mmu_sync_global(vcpu); > if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) > kvm_x86_ops->tlb_flush(vcpu); > if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS Windows will (I think) write a PDE on every context switch, so this effectively disables global unsync for that guest. What about recursively syncing the newly linked page in FNAME(fetch)()? If the page isn't global, this becomes a no-op, so no new overhead. The only question is the expense when linking a populated top-level page, especially in long mode.
On Mon, Mar 23, 2009 at 02:27:25PM -0300, Marcelo Tosatti wrote: > On Sun, Mar 22, 2009 at 11:35:00AM +0200, Avi Kivity wrote: > > Good catch, indeed. But is it sufficient? We could unlink a page > > through other means, for example by the guest zapping a page directory > > entry. > > Yep. > > > Maybe it's best to resync when relinking a global page? > > How about this. It will shorten the unsync period of global pages, > unfortunately. JFYI, it still fixes the problem seen with FreeBSD. > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 2a36f7f..bccdcc7 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -1238,6 +1238,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, > set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); > kvm_mmu_mark_parents_unsync(vcpu, sp); > } > + if (role.level != PT_PAGE_TABLE_LEVEL && > + !list_empty(&vcpu->kvm->arch.oos_global_pages)) > + set_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests); > + > pgprintk("%s: found\n", __func__); > return sp; > } > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 2ea8262..48169d7 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -3109,6 +3109,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) > kvm_write_guest_time(vcpu); > if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) > kvm_mmu_sync_roots(vcpu); > + if (test_and_clear_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests)) > + kvm_mmu_sync_global(vcpu); > if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) > kvm_x86_ops->tlb_flush(vcpu); > if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 11eb702..8efd6e3 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -37,7 +37,8 @@ > #define KVM_REQ_PENDING_TIMER 5 > #define KVM_REQ_UNHALT 6 > #define KVM_REQ_MMU_SYNC 7 > -#define KVM_REQ_KVMCLOCK_UPDATE 8 > +#define KVM_REQ_MMU_GLOBAL_SYNC 8 > +#define KVM_REQ_KVMCLOCK_UPDATE 9 > > #define KVM_USERSPACE_IRQ_SOURCE_ID 0 > >
On Tue, Mar 24, 2009 at 11:47:33AM +0200, Avi Kivity wrote: > Marcelo Tosatti wrote: >>> Maybe it's best to resync when relinking a global page? >>> >> >> How about this. It will shorten the unsync period of global pages, >> unfortunately. >> >> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c >> index 2a36f7f..bccdcc7 100644 >> --- a/arch/x86/kvm/mmu.c >> +++ b/arch/x86/kvm/mmu.c >> @@ -1238,6 +1238,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, >> set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); >> kvm_mmu_mark_parents_unsync(vcpu, sp); >> } >> + if (role.level != PT_PAGE_TABLE_LEVEL && >> + !list_empty(&vcpu->kvm->arch.oos_global_pages)) >> + set_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests); >> + >> pgprintk("%s: found\n", __func__); >> return sp; >> } >> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c >> index 2ea8262..48169d7 100644 >> --- a/arch/x86/kvm/x86.c >> +++ b/arch/x86/kvm/x86.c >> @@ -3109,6 +3109,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) >> kvm_write_guest_time(vcpu); >> if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) >> kvm_mmu_sync_roots(vcpu); >> + if (test_and_clear_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests)) >> + kvm_mmu_sync_global(vcpu); >> if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) >> kvm_x86_ops->tlb_flush(vcpu); >> if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS > > Windows will (I think) write a PDE on every context switch, so this > effectively disables global unsync for that guest. > > What about recursively syncing the newly linked page in FNAME(fetch)()? > If the page isn't global, this becomes a no-op, so no new overhead. The > only question is the expense when linking a populated top-level page, > especially in long mode. Yes, I started doing that but it touches the nice fastpath in fetch(). I'll see if I can come up with something and with numbers. Thanks -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a36f7f..bccdcc7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1238,6 +1238,10 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); kvm_mmu_mark_parents_unsync(vcpu, sp); } + if (role.level != PT_PAGE_TABLE_LEVEL && + !list_empty(&vcpu->kvm->arch.oos_global_pages)) + set_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests); + pgprintk("%s: found\n", __func__); return sp; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2ea8262..48169d7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3109,6 +3109,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_write_guest_time(vcpu); if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) kvm_mmu_sync_roots(vcpu); + if (test_and_clear_bit(KVM_REQ_MMU_GLOBAL_SYNC, &vcpu->requests)) + kvm_mmu_sync_global(vcpu); if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) kvm_x86_ops->tlb_flush(vcpu); if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 11eb702..8efd6e3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -37,7 +37,8 @@ #define KVM_REQ_PENDING_TIMER 5 #define KVM_REQ_UNHALT 6 #define KVM_REQ_MMU_SYNC 7 -#define KVM_REQ_KVMCLOCK_UPDATE 8 +#define KVM_REQ_MMU_GLOBAL_SYNC 8 +#define KVM_REQ_KVMCLOCK_UPDATE 9 #define KVM_USERSPACE_IRQ_SOURCE_ID 0