@@ -10375,6 +10375,28 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
return r;
}
+static int kvm_vcpu_non_preemptable(struct kvm_vcpu *vcpu)
+{
+ int count;
+
+ if (!vcpu->arch.pv_pc.preempt_count_enabled)
+ return 0;
+
+ if (!kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_pc.preempt_count_cache,
+ &count, sizeof(int)))
+ return (count & ~PREEMPT_NEED_RESCHED);
+
+ return 0;
+}
+
+bool kvm_arch_boost_candidate(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.irq_disabled || kvm_vcpu_non_preemptable(vcpu))
+ return true;
+
+ return false;
+}
+
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
@@ -1428,6 +1428,7 @@ bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);
+bool kvm_arch_boost_candidate(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -3545,6 +3545,11 @@ bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
return false;
}
+bool __weak kvm_arch_boost_candidate(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
@@ -3580,6 +3585,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
!kvm_arch_dy_has_pending_interrupt(vcpu) &&
!kvm_arch_vcpu_in_kernel(vcpu))
continue;
+ if (!kvm_arch_boost_candidate(vcpu))
+ continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;