diff mbox

[2/5] KVM: add KVM request variants without barrier

Message ID 20170216160449.13094-3-rkrcmar@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Radim Krčmář Feb. 16, 2017, 4:04 p.m. UTC
The leading underscores mean that the call is just a bitop wrapper.

Switch all users of open-coded set/check/test to kvm_request ones.
Automated by coccinelle script:
  @@
  expression VCPU, REQ;
  @@
  -set_bit(REQ, &VCPU->requests)
  +__kvm_request_set(REQ, VCPU)

  @@
  expression VCPU, REQ;
  @@
  -clear_bit(REQ, &VCPU->requests)
  +__kvm_request_clear(REQ, VCPU)

  @@
  expression VCPU, REQ;
  @@
  -test_bit(REQ, &VCPU->requests)
  +__kvm_request_test(REQ, VCPU)

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 arch/mips/kvm/emulate.c           |  2 +-
 arch/powerpc/kvm/book3s_pr.c      |  2 +-
 arch/powerpc/kvm/book3s_pr_papr.c |  2 +-
 arch/powerpc/kvm/booke.c          |  4 ++--
 arch/powerpc/kvm/powerpc.c        |  2 +-
 arch/s390/kvm/kvm-s390.c          |  2 +-
 arch/x86/kvm/vmx.c                |  2 +-
 arch/x86/kvm/x86.c                | 14 +++++++-------
 include/linux/kvm_host.h          | 22 +++++++++++++++++++---
 9 files changed, 34 insertions(+), 18 deletions(-)

Comments

Paolo Bonzini Feb. 23, 2017, 10:57 a.m. UTC | #1
On 16/02/2017 17:04, Radim Krčmář wrote:
> +
> +static inline void __kvm_request_clear(unsigned req, struct kvm_vcpu *vcpu)
> +{
> +	test_bit(req, &vcpu->requests);
>  }

Are you sure? :)

Paolo

>  static inline bool kvm_request_test_and_clear(unsigned req, struct kvm_vcpu *vcpu)
>  {
> -	if (test_bit(req, &vcpu->requests)) {
> -		clear_bit(req, &vcpu->requests);
> +	if (__kvm_request_test(req, vcpu)) {
> +		__kvm_request_clear(req, vcpu);
>  
>  		/*
>  		 * Ensure the rest of the request is visible to
> --
Radim Krčmář Feb. 23, 2017, 3:50 p.m. UTC | #2
2017-02-23 11:57+0100, Paolo Bonzini:
> On 16/02/2017 17:04, Radim Krčmář wrote:
>> +
>> +static inline void __kvm_request_clear(unsigned req, struct kvm_vcpu *vcpu)
>> +{
>> +	test_bit(req, &vcpu->requests);
>>  }
> 
> Are you sure? :)

No, as always. :)
diff mbox

Patch

diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index ee4af898bcf6..552ae2b5e911 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -865,7 +865,7 @@  enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 		 * check if any I/O interrupts are pending.
 		 */
 		if (kvm_request_test_and_clear(KVM_REQ_UNHALT, vcpu)) {
-			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+			__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 		}
 	}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7af5154e848b..f5894d27a8a9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -349,7 +349,7 @@  static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
 	if (msr & MSR_POW) {
 		if (!vcpu->arch.pending_exceptions) {
 			kvm_vcpu_block(vcpu);
-			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+			__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 			vcpu->stat.halt_wakeup++;
 
 			/* Unset POW bit after we woke up */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f102616febc7..86847220811a 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -344,7 +344,7 @@  int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
 	case H_CEDE:
 		kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
 		kvm_vcpu_block(vcpu);
-		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 		vcpu->stat.halt_wakeup++;
 		return EMULATE_DONE;
 	case H_LOGICAL_CI_LOAD:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 806caaf60e10..e9098af0ab2a 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -579,7 +579,7 @@  static void arm_next_watchdog(struct kvm_vcpu *vcpu)
 	 * userspace, so clear the KVM_REQ_WATCHDOG request.
 	 */
 	if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
-		clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_WATCHDOG, vcpu);
 
 	spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
 	nr_jiffies = watchdog_next_timeout(vcpu);
@@ -690,7 +690,7 @@  int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
 	if (vcpu->arch.shared->msr & MSR_WE) {
 		local_irq_enable();
 		kvm_vcpu_block(vcpu);
-		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 		hard_irq_disable();
 
 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 19e89419a843..f570ca9cd8c6 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -231,7 +231,7 @@  int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 	case EV_HCALL_TOKEN(EV_IDLE):
 		r = EV_SUCCESS;
 		kvm_vcpu_block(vcpu);
-		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 		break;
 	default:
 		r = EV_UNIMPLEMENTED;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index db3c742a5dc9..0fa77e6fdfaf 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2443,7 +2443,7 @@  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 	}
 
 	/* nothing to do, just clear the request */
-	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+	__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 
 	return 0;
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b183b4ac3ea5..7ab638f2189b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6352,7 +6352,7 @@  static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 		if (intr_window_requested && vmx_interrupt_allowed(vcpu))
 			return handle_interrupt_window(&vmx->vcpu);
 
-		if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+		if (__kvm_request_test(KVM_REQ_EVENT, vcpu))
 			return 1;
 
 		err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index da125323682a..6e8c62c29a5a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1767,7 +1767,7 @@  static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 	/* guest entries allowed */
 	kvm_for_each_vcpu(i, vcpu, kvm)
-		clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
 
 	spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
@@ -2228,8 +2228,8 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
 
 			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
-				set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
-						&vcpu->requests);
+				__kvm_request_set(KVM_REQ_MASTERCLOCK_UPDATE,
+						  vcpu);
 
 			ka->boot_vcpu_runs_old_kvmclock = tmp;
 		}
@@ -2816,7 +2816,7 @@  static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
 
 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 {
-	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+	__kvm_request_set(KVM_REQ_MIGRATE_TIMER, vcpu);
 }
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -7048,7 +7048,7 @@  static int vcpu_run(struct kvm_vcpu *vcpu)
 		if (r <= 0)
 			break;
 
-		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_PENDING_TIMER, vcpu);
 		if (kvm_cpu_has_pending_timer(vcpu))
 			kvm_inject_pending_timer_irqs(vcpu);
 
@@ -7176,7 +7176,7 @@  int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
 		kvm_vcpu_block(vcpu);
 		kvm_apic_accept_events(vcpu);
-		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+		__kvm_request_clear(KVM_REQ_UNHALT, vcpu);
 		r = -EAGAIN;
 		goto out;
 	}
@@ -8381,7 +8381,7 @@  static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 	if (atomic_read(&vcpu->arch.nmi_queued))
 		return true;
 
-	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+	if (__kvm_request_test(KVM_REQ_SMI, vcpu))
 		return true;
 
 	if (kvm_arch_interrupt_allowed(vcpu) &&
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 21f91de3098b..d899473859d3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1100,6 +1100,12 @@  static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
  * TODO: the API is inconsistent -- a request doesn't call kvm_vcpu_kick(), but
  * forces smp_wmb() for all requests.
  */
+
+static inline void __kvm_request_set(unsigned req, struct kvm_vcpu *vcpu)
+{
+	set_bit(req, &vcpu->requests);
+}
+
 static inline void kvm_request_set(unsigned req, struct kvm_vcpu *vcpu)
 {
 	/*
@@ -1108,13 +1114,23 @@  static inline void kvm_request_set(unsigned req, struct kvm_vcpu *vcpu)
 	 * Paired with the smp_mb__after_atomic in kvm_request_test_and_clear.
 	 */
 	smp_wmb();
-	set_bit(req, &vcpu->requests);
+	__kvm_request_set(req, vcpu);
+}
+
+static inline bool __kvm_request_test(unsigned req, struct kvm_vcpu *vcpu)
+{
+	return test_bit(req, &vcpu->requests);
+}
+
+static inline void __kvm_request_clear(unsigned req, struct kvm_vcpu *vcpu)
+{
+	test_bit(req, &vcpu->requests);
 }
 
 static inline bool kvm_request_test_and_clear(unsigned req, struct kvm_vcpu *vcpu)
 {
-	if (test_bit(req, &vcpu->requests)) {
-		clear_bit(req, &vcpu->requests);
+	if (__kvm_request_test(req, vcpu)) {
+		__kvm_request_clear(req, vcpu);
 
 		/*
 		 * Ensure the rest of the request is visible to