Message ID | 20130624124415.27508.74230.sendpatchset@codeblue.in.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Jun 24, 2013 at 06:14:15PM +0530, Raghavendra K T wrote: > kvm hypervisor: Add directed yield in vcpu block path > > From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> > > We use the improved PLE handler logic in vcpu block patch for > scheduling rather than plain schedule, so that we can make > intelligent decisions. > What kind of improvement this provides? Doesn't it screw up our pause exit heuristics? > Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> > --- > arch/ia64/include/asm/kvm_host.h | 5 +++++ > arch/powerpc/include/asm/kvm_host.h | 5 +++++ > arch/s390/include/asm/kvm_host.h | 5 +++++ > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/x86.c | 8 ++++++++ > include/linux/kvm_host.h | 2 +- > virt/kvm/kvm_main.c | 6 ++++-- This miss some arches. > 7 files changed, 29 insertions(+), 4 deletions(-) > > diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h > index 989dd3f..999ab15 100644 > --- a/arch/ia64/include/asm/kvm_host.h > +++ b/arch/ia64/include/asm/kvm_host.h > @@ -595,6 +595,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); > int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); > void kvm_sal_emul(struct kvm_vcpu *vcpu); > > +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) > +{ > + schedule(); > +} > + > #define __KVM_HAVE_ARCH_VM_ALLOC 1 > struct kvm *kvm_arch_alloc_vm(void); > void kvm_arch_free_vm(struct kvm *kvm); > diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h > index af326cd..1aeecc0 100644 > --- a/arch/powerpc/include/asm/kvm_host.h > +++ b/arch/powerpc/include/asm/kvm_host.h > @@ -628,4 +628,9 @@ struct kvm_vcpu_arch { > #define __KVM_HAVE_ARCH_WQP > #define __KVM_HAVE_CREATE_DEVICE > > +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) > +{ > + schedule(); > +} > + > #endif /* __POWERPC_KVM_HOST_H__ */ > diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h > index 16bd5d1..db09a56 100644 > --- a/arch/s390/include/asm/kvm_host.h > +++ b/arch/s390/include/asm/kvm_host.h > @@ -266,4 +266,9 @@ struct kvm_arch{ > }; > > extern int sie64a(struct kvm_s390_sie_block *, u64 *); > +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) > +{ > + schedule(); > +} > + > #endif > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 95702de..72ff791 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1042,5 +1042,5 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); > int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); > void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); > void kvm_deliver_pmi(struct kvm_vcpu *vcpu); > - > +void kvm_do_schedule(struct kvm_vcpu *vcpu); > #endif /* _ASM_X86_KVM_HOST_H */ > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index b963c86..84a4eb2 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -7281,6 +7281,14 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) > kvm_x86_ops->interrupt_allowed(vcpu); > } > > +void kvm_do_schedule(struct kvm_vcpu *vcpu) > +{ > + /* We try to yield to a kicked vcpu else do a schedule */ > + if (kvm_vcpu_on_spin(vcpu) <= 0) > + schedule(); > +} > +EXPORT_SYMBOL_GPL(kvm_do_schedule); > + > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); > EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index f0eea07..39efc18 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -565,7 +565,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, > void kvm_vcpu_block(struct kvm_vcpu *vcpu); > void kvm_vcpu_kick(struct kvm_vcpu *vcpu); > bool kvm_vcpu_yield_to(struct kvm_vcpu *target); > -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); > +bool kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); > void kvm_resched(struct kvm_vcpu *vcpu); > void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); > void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 302681c..8387247 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -1685,7 +1685,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) > if (signal_pending(current)) > break; > > - schedule(); > + kvm_do_schedule(vcpu); > } > > finish_wait(&vcpu->wq, &wait); > @@ -1786,7 +1786,7 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) > } > #endif > > -void kvm_vcpu_on_spin(struct kvm_vcpu *me) > +bool kvm_vcpu_on_spin(struct kvm_vcpu *me) > { > struct kvm *kvm = me->kvm; > struct kvm_vcpu *vcpu; > @@ -1835,6 +1835,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) > > /* Ensure vcpu is not eligible during next spinloop */ > kvm_vcpu_set_dy_eligible(me, false); > + > + return yielded; > } > EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); > -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
On 07/14/2013 07:48 PM, Gleb Natapov wrote: > On Mon, Jun 24, 2013 at 06:14:15PM +0530, Raghavendra K T wrote: >> kvm hypervisor: Add directed yield in vcpu block path >> >> From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> >> >> We use the improved PLE handler logic in vcpu block patch for >> scheduling rather than plain schedule, so that we can make >> intelligent decisions. >> > What kind of improvement this provides? Doesn't it screw up our pause > exit heuristics? No. it does not affect negatively from what I saw. ( and even from results). So it will make ple handler to avoid yield_to to those vcpus, still looping in halt handler. May be vcpus which are pv_unhalted but not running, may miss this. but haven't seen anything bad happening with that. But yes, planning to do some experiments around this. Thanks for bringing this up. Between another good point about this is, though non PLE hardwares are becoming obsolete, they can potentially take the advantage of PLE handler. > >> Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> >> --- >> arch/ia64/include/asm/kvm_host.h | 5 +++++ >> arch/powerpc/include/asm/kvm_host.h | 5 +++++ >> arch/s390/include/asm/kvm_host.h | 5 +++++ >> arch/x86/include/asm/kvm_host.h | 2 +- >> arch/x86/kvm/x86.c | 8 ++++++++ >> include/linux/kvm_host.h | 2 +- >> virt/kvm/kvm_main.c | 6 ++++-- > This miss some arches. Thanks for pointing that. arm and mips were not around at V8 time, I missed them. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 989dd3f..999ab15 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -595,6 +595,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu); int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); void kvm_sal_emul(struct kvm_vcpu *vcpu); +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) +{ + schedule(); +} + #define __KVM_HAVE_ARCH_VM_ALLOC 1 struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index af326cd..1aeecc0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -628,4 +628,9 @@ struct kvm_vcpu_arch { #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) +{ + schedule(); +} + #endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 16bd5d1..db09a56 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -266,4 +266,9 @@ struct kvm_arch{ }; extern int sie64a(struct kvm_s390_sie_block *, u64 *); +static inline void kvm_do_schedule(struct kvm_vcpu *vcpu) +{ + schedule(); +} + #endif diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 95702de..72ff791 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1042,5 +1042,5 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); void kvm_deliver_pmi(struct kvm_vcpu *vcpu); - +void kvm_do_schedule(struct kvm_vcpu *vcpu); #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b963c86..84a4eb2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7281,6 +7281,14 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) kvm_x86_ops->interrupt_allowed(vcpu); } +void kvm_do_schedule(struct kvm_vcpu *vcpu) +{ + /* We try to yield to a kicked vcpu else do a schedule */ + if (kvm_vcpu_on_spin(vcpu) <= 0) + schedule(); +} +EXPORT_SYMBOL_GPL(kvm_do_schedule); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f0eea07..39efc18 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -565,7 +565,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); bool kvm_vcpu_yield_to(struct kvm_vcpu *target); -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); +bool kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_resched(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 302681c..8387247 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1685,7 +1685,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) if (signal_pending(current)) break; - schedule(); + kvm_do_schedule(vcpu); } finish_wait(&vcpu->wq, &wait); @@ -1786,7 +1786,7 @@ bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) } #endif -void kvm_vcpu_on_spin(struct kvm_vcpu *me) +bool kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; @@ -1835,6 +1835,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) /* Ensure vcpu is not eligible during next spinloop */ kvm_vcpu_set_dy_eligible(me, false); + + return yielded; } EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);