@@ -5903,7 +5903,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
goto out;
}
- if (signal_pending(current))
+ if (kvm_need_exit(vcpu))
goto out;
if (need_resched())
schedule();
@@ -6547,6 +6547,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
++vcpu->stat.signal_exits;
break;
}
+ if (unlikely(kvm_has_request(KVM_REQ_EXIT, vcpu))) {
+ r = 0;
+ vcpu->run->exit_reason = KVM_EXIT_REQUEST;
+ break;
+ }
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
cond_resched();
@@ -6683,6 +6688,7 @@ out:
post_kvm_run_save(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ clear_bit(KVM_REQ_EXIT, &vcpu->requests);
return r;
}
@@ -121,7 +121,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_UNHALT 6
#define KVM_REQ_MMU_SYNC 7
#define KVM_REQ_CLOCK_UPDATE 8
-#define KVM_REQ_KICK 9
+#define KVM_REQ_EXIT 9
#define KVM_REQ_DEACTIVATE_FPU 10
#define KVM_REQ_EVENT 11
#define KVM_REQ_APF_HALT 12
@@ -1104,6 +1104,12 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+static inline bool kvm_need_exit(struct kvm_vcpu *vcpu)
+{
+ return signal_pending(current) ||
+ kvm_has_request(KVM_REQ_EXIT, vcpu);
+}
+
extern bool kvm_rebooting;
struct kvm_device {
@@ -184,6 +184,7 @@ struct kvm_s390_skeys {
#define KVM_EXIT_SYSTEM_EVENT 24
#define KVM_EXIT_S390_STSI 25
#define KVM_EXIT_IOAPIC_EOI 26
+#define KVM_EXIT_REQUEST 27
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -1914,7 +1914,7 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
}
if (kvm_cpu_has_pending_timer(vcpu))
return -EINTR;
- if (signal_pending(current))
+ if (kvm_need_exit(vcpu))
return -EINTR;
return 0;
When userspace wants KVM to exit to userspace, it sends a signal. This has a disadvantage of requiring a change to the signal mask because the signal needs to be blocked in userspace to stay pending when sending to self. Using a request flag allows us to shave 200-300 cycles from every userspace exit and the speedup grows with NUMA because unblocking touches shared spinlock. The disadvantage is that it adds an overhead of one bit check for all kernel exits. A quick tracing shows that the ratio of userspace exits after boot is about 1/5 and in subsequent run of nmap and kernel compile has about 1/60, so the check should not regress global performance. All signal_pending() calls are userspace exit requests, so we add a check for KVM_REQ_EXIT there. There is one omitted call in kvm_vcpu_run because KVM_REQ_EXIT is implied in earlier check for requests. Signed-off-by: Radim Kr?má? <rkrcmar@redhat.com> --- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/x86.c | 6 ++++++ include/linux/kvm_host.h | 8 +++++++- include/uapi/linux/kvm.h | 1 + virt/kvm/kvm_main.c | 2 +- 5 files changed, 16 insertions(+), 3 deletions(-)