@@ -484,6 +484,17 @@ struct kvm_vcpu_arch {
u64 length;
u64 status;
} osvw;
+
+ /*
+ * Pause loop exit optimization
+ * pause_loop_exited: set when a vcpu does a pause loop exit.
+ * dy_eligible: indicates whether vcpu is eligible for directed yield.
+ */
+ struct {
+ bool pause_loop_exited;
+ bool dy_eligible;
+ } ple;
+
};
struct kvm_lpage_info {
@@ -3264,6 +3264,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
static int pause_interception(struct vcpu_svm *svm)
{
+ svm->vcpu.arch.ple.pause_loop_exited = true;
kvm_vcpu_on_spin(&(svm->vcpu));
return 1;
}
@@ -4945,6 +4945,7 @@ out:
static int handle_pause(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
+ vcpu->arch.ple.pause_loop_exited = true;
kvm_vcpu_on_spin(vcpu);
return 1;
@@ -5331,7 +5331,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
-
+ vcpu->arch.ple.pause_loop_exited = false;
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6168,6 +6168,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
+ vcpu->arch.ple.pause_loop_exited = false;
+ vcpu->arch.ple.dy_eligible = false;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;