diff mbox

[v2,1/5] KVM: rename API for requests to match bit operations

Message ID 20170224195002.28030-2-rkrcmar@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Radim Krčmář Feb. 24, 2017, 7:49 p.m. UTC
kvm_make_request was a wrapper that added barriers to set_bit and
kvm_check_request did the same for test_bit and check_bit, but the name
was not very obvious.

The renaming:
  kvm_request_set            <- kvm_make_request
  kvm_request_test_and_clear <- kvm_check_request

Automated with coccinelle script:
  @@
  expression VCPU, REQ;
  @@
  -kvm_make_request(REQ, VCPU)
  +kvm_request_set(REQ, VCPU)

  @@
  expression VCPU, REQ;
  @@
  -kvm_check_request(REQ, VCPU)
  +kvm_request_test_and_clear(REQ, VCPU)

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
---
 v2:
 * added sob [Cornelia]
 * tweaked TODO [David]
---
 arch/mips/kvm/emulate.c      |   2 +-
 arch/mips/kvm/trap_emul.c    |   2 +-
 arch/powerpc/kvm/book3s_pr.c |   2 +-
 arch/powerpc/kvm/booke.c     |  16 +++---
 arch/powerpc/kvm/powerpc.c   |   2 +-
 arch/s390/kvm/kvm-s390.c     |  22 ++++----
 arch/s390/kvm/kvm-s390.h     |   4 +-
 arch/s390/kvm/priv.c         |   4 +-
 arch/x86/kvm/hyperv.c        |  14 ++---
 arch/x86/kvm/i8259.c         |   2 +-
 arch/x86/kvm/lapic.c         |  22 ++++----
 arch/x86/kvm/mmu.c           |  14 ++---
 arch/x86/kvm/pmu.c           |   6 +--
 arch/x86/kvm/svm.c           |  12 ++---
 arch/x86/kvm/vmx.c           |  30 +++++------
 arch/x86/kvm/x86.c           | 126 +++++++++++++++++++++----------------------
 include/linux/kvm_host.h     |  33 +++++++++---
 virt/kvm/kvm_main.c          |   4 +-
 18 files changed, 169 insertions(+), 148 deletions(-)

Comments

David Hildenbrand Feb. 27, 2017, 9:54 a.m. UTC | #1
Am 24.02.2017 um 20:49 schrieb Radim Krčmář:
> kvm_make_request was a wrapper that added barriers to set_bit and
> kvm_check_request did the same for test_bit and check_bit, but the name
> was not very obvious.
> 
> The renaming:
>   kvm_request_set            <- kvm_make_request
>   kvm_request_test_and_clear <- kvm_check_request
> 

What about kvm_req_set() / kvm_req_test_and_clear()

Matches the request bit definitions (KVM_REQ_*) and saves a couple of
characters.

Anyhow

Reviewed-by: David Hildenbrand <david@redhat.com>
Radim Krčmář March 1, 2017, 4:37 p.m. UTC | #2
2017-02-27 10:54+0100, David Hildenbrand:
> Am 24.02.2017 um 20:49 schrieb Radim Krčmář:
>> kvm_make_request was a wrapper that added barriers to set_bit and
>> kvm_check_request did the same for test_bit and check_bit, but the name
>> was not very obvious.
>> 
>> The renaming:
>>   kvm_request_set            <- kvm_make_request
>>   kvm_request_test_and_clear <- kvm_check_request
>> 
> 
> What about kvm_req_set() / kvm_req_test_and_clear()
> Matches the request bit definitions (KVM_REQ_*) and saves a couple of
> characters.

Good point.  I prefer to avoid abbreviations, but can be easily
convinced on this.

> Anyhow
> 
> Reviewed-by: David Hildenbrand <david@redhat.com>

I'll do nothing for now as it would just spin another round of reviews.

Thanks.
diff mbox

Patch

diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d40cfaad4529..ee4af898bcf6 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -864,7 +864,7 @@  enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 		 * We we are runnable, then definitely go off to user space to
 		 * check if any I/O interrupts are pending.
 		 */
-		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_UNHALT, vcpu)) {
 			clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 		}
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index b1fa53b252ea..35068823cde6 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1032,7 +1032,7 @@  static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
 	if (likely(!vcpu->requests))
 		return;
 
-	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_TLB_FLUSH, vcpu)) {
 		/*
 		 * Both kernel & user GVA mappings must be invalidated. The
 		 * caller is just about to check whether the ASID is stale
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d4dfc0ca2a44..7af5154e848b 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -241,7 +241,7 @@  static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
 
 	/* We misuse TLB_FLUSH to indicate that we want to clear
 	   all shadow cache entries */
-	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+	if (kvm_request_test_and_clear(KVM_REQ_TLB_FLUSH, vcpu))
 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
 
 	return r;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0514cbd4e533..806caaf60e10 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -503,7 +503,7 @@  static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
 			kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
 		if (update_epr == true) {
 			if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
-				kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
+				kvm_request_set(KVM_REQ_EPR_EXIT, vcpu);
 			else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
 				BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
 				kvmppc_mpic_set_epr(vcpu);
@@ -617,7 +617,7 @@  void kvmppc_watchdog_func(unsigned long data)
 
 	if (new_tsr & TSR_WIS) {
 		smp_wmb();
-		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+		kvm_request_set(KVM_REQ_PENDING_TIMER, vcpu);
 		kvm_vcpu_kick(vcpu);
 	}
 
@@ -628,7 +628,7 @@  void kvmppc_watchdog_func(unsigned long data)
 	if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
 	    vcpu->arch.watchdog_enabled) {
 		smp_wmb();
-		kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
+		kvm_request_set(KVM_REQ_WATCHDOG, vcpu);
 		kvm_vcpu_kick(vcpu);
 	}
 
@@ -704,19 +704,19 @@  int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 {
 	int r = 1; /* Indicate we want to get back into the guest */
 
-	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
+	if (kvm_request_test_and_clear(KVM_REQ_PENDING_TIMER, vcpu))
 		update_timer_ints(vcpu);
 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
-	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+	if (kvm_request_test_and_clear(KVM_REQ_TLB_FLUSH, vcpu))
 		kvmppc_core_flush_tlb(vcpu);
 #endif
 
-	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_WATCHDOG, vcpu)) {
 		vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
 		r = 0;
 	}
 
-	if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_EPR_EXIT, vcpu)) {
 		vcpu->run->epr.epr = 0;
 		vcpu->arch.epr_needed = true;
 		vcpu->run->exit_reason = KVM_EXIT_EPR;
@@ -1830,7 +1830,7 @@  void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
 {
 	set_bits(tsr_bits, &vcpu->arch.tsr);
 	smp_wmb();
-	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+	kvm_request_set(KVM_REQ_PENDING_TIMER, vcpu);
 	kvm_vcpu_kick(vcpu);
 }
 
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2b38d824e9e5..03f563000160 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -190,7 +190,7 @@  int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 			 * NX. If that's the case, remove !PR NX capability.
 			 */
 			vcpu->arch.disable_kernel_nx = true;
-			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+			kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 		}
 
 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f5694838234d..8fb210974c42 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2098,7 +2098,7 @@  void exit_sie(struct kvm_vcpu *vcpu)
 /* Kick a guest cpu out of SIE to process a request synchronously */
 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
 {
-	kvm_make_request(req, vcpu);
+	kvm_request_set(req, vcpu);
 	kvm_s390_vcpu_request(vcpu);
 }
 
@@ -2403,24 +2403,24 @@  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 	 * already finished. We might race against a second unmapper that
 	 * wants to set the blocking bit. Lets just retry the request loop.
 	 */
-	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_MMU_RELOAD, vcpu)) {
 		int rc;
 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
 					  kvm_s390_get_prefix(vcpu),
 					  PAGE_SIZE * 2, PROT_WRITE);
 		if (rc) {
-			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+			kvm_request_set(KVM_REQ_MMU_RELOAD, vcpu);
 			return rc;
 		}
 		goto retry;
 	}
 
-	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_TLB_FLUSH, vcpu)) {
 		vcpu->arch.sie_block->ihcpu = 0xffff;
 		goto retry;
 	}
 
-	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_ENABLE_IBS, vcpu)) {
 		if (!ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
 			atomic_or(CPUSTAT_IBS,
@@ -2429,7 +2429,7 @@  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 		goto retry;
 	}
 
-	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_DISABLE_IBS, vcpu)) {
 		if (ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
 			atomic_andnot(CPUSTAT_IBS,
@@ -2438,7 +2438,7 @@  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 		goto retry;
 	}
 
-	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
+	if (kvm_request_test_and_clear(KVM_REQ_ICPT_OPEREXC, vcpu)) {
 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
 		goto retry;
 	}
@@ -2724,7 +2724,7 @@  static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
 		/* some control register changes require a tlb flush */
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	}
 	if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
@@ -2924,7 +2924,7 @@  int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
-	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
+	kvm_request_test_and_clear(KVM_REQ_ENABLE_IBS, vcpu);
 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
 }
 
@@ -2942,7 +2942,7 @@  static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
 	if (!sclp.has_ibs)
 		return;
-	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
+	kvm_request_test_and_clear(KVM_REQ_DISABLE_IBS, vcpu);
 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
 }
 
@@ -2980,7 +2980,7 @@  void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
 	 * Another VCPU might have used IBS while we were offline.
 	 * Let's play safe and flush the VCPU at startup.
 	 */
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+	kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
 	return;
 }
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index af9fa91a0c91..ceb12cbcabaf 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -82,8 +82,8 @@  static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
 	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
 		   prefix);
 	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-	kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
+	kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
+	kvm_request_set(KVM_REQ_MMU_RELOAD, vcpu);
 }
 
 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index fb4b494cde9b..ea2efa3cf1e9 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -998,7 +998,7 @@  int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
 			break;
 		reg = (reg + 1) % 16;
 	} while (1);
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+	kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	return 0;
 }
 
@@ -1070,7 +1070,7 @@  static int handle_lctlg(struct kvm_vcpu *vcpu)
 			break;
 		reg = (reg + 1) % 16;
 	} while (1);
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+	kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	return 0;
 }
 
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index f701d4430727..4ab702a9e67d 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -100,7 +100,7 @@  static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
 		__clear_bit(vector, synic->auto_eoi_bitmap);
 
 	/* Load SynIC vectors into EOI exit bitmap */
-	kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
+	kvm_request_set(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
 	return 0;
 }
 
@@ -170,7 +170,7 @@  static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
 		}
 	}
 	if (stimers_pending)
-		kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
+		kvm_request_set(KVM_REQ_HV_STIMER, vcpu);
 
 	idx = srcu_read_lock(&kvm->irq_srcu);
 	gsi = atomic_read(&synic->sint_to_gsi[sint]);
@@ -190,7 +190,7 @@  static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
 	hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
 	hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
 
-	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
+	kvm_request_set(KVM_REQ_HV_EXIT, vcpu);
 }
 
 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
@@ -410,7 +410,7 @@  static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
 
 	set_bit(stimer->index,
 		vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
-	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
+	kvm_request_set(KVM_REQ_HV_STIMER, vcpu);
 	if (vcpu_kick)
 		kvm_vcpu_kick(vcpu);
 }
@@ -752,7 +752,7 @@  static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
 			  hv->hv_crash_param[4]);
 
 		/* Send notification about crash to user space */
-		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+		kvm_request_set(KVM_REQ_HV_CRASH, vcpu);
 	}
 
 	return 0;
@@ -939,7 +939,7 @@  static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 	case HV_X64_MSR_REFERENCE_TSC:
 		hv->hv_tsc_page = data;
 		if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
-			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+			kvm_request_set(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 		break;
 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
 		return kvm_hv_msr_set_crash_data(vcpu,
@@ -950,7 +950,7 @@  static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 	case HV_X64_MSR_RESET:
 		if (data == 1) {
 			vcpu_debug(vcpu, "hyper-v reset requested\n");
-			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
+			kvm_request_set(KVM_REQ_HV_RESET, vcpu);
 		}
 		break;
 	default:
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..b85225d36aae 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -67,7 +67,7 @@  static void pic_unlock(struct kvm_pic *s)
 		if (!found)
 			return;
 
-		kvm_make_request(KVM_REQ_EVENT, found);
+		kvm_request_set(KVM_REQ_EVENT, found);
 		kvm_vcpu_kick(found);
 	}
 }
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bad6a25067bc..0297eea0d47b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -616,7 +616,7 @@  static void apic_update_ppr(struct kvm_lapic *apic)
 
 	if (__apic_update_ppr(apic, &ppr) &&
 	    apic_has_interrupt_for_ppr(apic, ppr) != -1)
-		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+		kvm_request_set(KVM_REQ_EVENT, apic->vcpu);
 }
 
 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
@@ -978,7 +978,7 @@  static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 		else {
 			kvm_lapic_set_irr(vector, apic);
 
-			kvm_make_request(KVM_REQ_EVENT, vcpu);
+			kvm_request_set(KVM_REQ_EVENT, vcpu);
 			kvm_vcpu_kick(vcpu);
 		}
 		break;
@@ -986,13 +986,13 @@  static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 	case APIC_DM_REMRD:
 		result = 1;
 		vcpu->arch.pv.pv_unhalted = 1;
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 		kvm_vcpu_kick(vcpu);
 		break;
 
 	case APIC_DM_SMI:
 		result = 1;
-		kvm_make_request(KVM_REQ_SMI, vcpu);
+		kvm_request_set(KVM_REQ_SMI, vcpu);
 		kvm_vcpu_kick(vcpu);
 		break;
 
@@ -1010,7 +1010,7 @@  static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 			/* make sure pending_events is visible before sending
 			 * the request */
 			smp_wmb();
-			kvm_make_request(KVM_REQ_EVENT, vcpu);
+			kvm_request_set(KVM_REQ_EVENT, vcpu);
 			kvm_vcpu_kick(vcpu);
 		} else {
 			apic_debug("Ignoring de-assert INIT to vcpu %d\n",
@@ -1026,7 +1026,7 @@  static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 		/* make sure sipi_vector is visible for the receiver */
 		smp_wmb();
 		set_bit(KVM_APIC_SIPI, &apic->pending_events);
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 		kvm_vcpu_kick(vcpu);
 		break;
 
@@ -1067,7 +1067,7 @@  static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
 	/* Request a KVM exit to inform the userspace IOAPIC. */
 	if (irqchip_split(apic->vcpu->kvm)) {
 		apic->vcpu->arch.pending_ioapic_eoi = vector;
-		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
+		kvm_request_set(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
 		return;
 	}
 
@@ -1099,7 +1099,7 @@  static int apic_set_eoi(struct kvm_lapic *apic)
 		kvm_hv_synic_send_eoi(apic->vcpu, vector);
 
 	kvm_ioapic_send_eoi(apic, vector);
-	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, apic->vcpu);
 	return vector;
 }
 
@@ -1114,7 +1114,7 @@  void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
 	trace_kvm_eoi(apic, vector);
 
 	kvm_ioapic_send_eoi(apic, vector);
-	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, apic->vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
 
@@ -1179,7 +1179,7 @@  static void __report_tpr_access(struct kvm_lapic *apic, bool write)
 	struct kvm_vcpu *vcpu = apic->vcpu;
 	struct kvm_run *run = vcpu->run;
 
-	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
+	kvm_request_set(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
 	run->tpr_access.rip = kvm_rip_read(vcpu);
 	run->tpr_access.is_write = write;
 }
@@ -2217,7 +2217,7 @@  int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
 		kvm_x86_ops->hwapic_isr_update(vcpu,
 				apic_find_highest_isr(apic));
 	}
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 	if (ioapic_in_kernel(vcpu->kvm))
 		kvm_rtc_eoi_tracking_restore_one(vcpu);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2fd7586aad4d..a588fce0be85 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2075,7 +2075,7 @@  static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
 	if (remote_flush)
 		kvm_flush_remote_tlbs(vcpu->kvm);
 	else if (local_flush)
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
 #ifdef CONFIG_KVM_MMU_AUDIT
@@ -2281,11 +2281,11 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 				break;
 
 			WARN_ON(!list_empty(&invalid_list));
-			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+			kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 		}
 
 		if (sp->unsync_children)
-			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+			kvm_request_set(KVM_REQ_MMU_SYNC, vcpu);
 
 		__clear_sp_write_flooding_count(sp);
 		trace_kvm_mmu_get_page(sp, false);
@@ -2769,7 +2769,7 @@  static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
 	      true, host_writable)) {
 		if (write_fault)
 			emulate = true;
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	}
 
 	if (unlikely(is_mmio_spte(*sptep)))
@@ -3303,7 +3303,7 @@  static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
 	int ret = 0;
 
 	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+		kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		ret = 1;
 	}
 
@@ -3707,7 +3707,7 @@  static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 		trace_kvm_try_async_get_page(gva, gfn);
 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
 			trace_kvm_async_pf_doublefault(gva, gfn);
-			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+			kvm_request_set(KVM_REQ_APF_HALT, vcpu);
 			return true;
 		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
 			return true;
@@ -4765,7 +4765,7 @@  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	vcpu->arch.mmu.invlpg(vcpu, gva);
-	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+	kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 	++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 06ce377dcbc9..54fd50ad61c5 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -65,7 +65,7 @@  static void kvm_perf_overflow(struct perf_event *perf_event,
 	if (!test_and_set_bit(pmc->idx,
 			      (unsigned long *)&pmu->reprogram_pmi)) {
 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+		kvm_request_set(KVM_REQ_PMU, pmc->vcpu);
 	}
 }
 
@@ -79,7 +79,7 @@  static void kvm_perf_overflow_intr(struct perf_event *perf_event,
 	if (!test_and_set_bit(pmc->idx,
 			      (unsigned long *)&pmu->reprogram_pmi)) {
 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+		kvm_request_set(KVM_REQ_PMU, pmc->vcpu);
 
 		/*
 		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
@@ -92,7 +92,7 @@  static void kvm_perf_overflow_intr(struct perf_event *perf_event,
 		if (!kvm_is_in_guest())
 			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
 		else
-			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+			kvm_request_set(KVM_REQ_PMI, pmc->vcpu);
 	}
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..57ea99d0ec30 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2200,7 +2200,7 @@  static void svm_handle_mce(struct vcpu_svm *svm)
 		 */
 		pr_err("KVM: Guest triggered AMD Erratum 383\n");
 
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
+		kvm_request_set(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
 
 		return;
 	}
@@ -3072,7 +3072,7 @@  static int stgi_interception(struct vcpu_svm *svm)
 
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
 	skip_emulated_instruction(&svm->vcpu);
-	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, &svm->vcpu);
 
 	enable_gif(svm);
 
@@ -3220,7 +3220,7 @@  static int iret_interception(struct vcpu_svm *svm)
 	clr_intercept(svm, INTERCEPT_IRET);
 	svm->vcpu.arch.hflags |= HF_IRET_MASK;
 	svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
-	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, &svm->vcpu);
 	return 1;
 }
 
@@ -3659,7 +3659,7 @@  static int msr_interception(struct vcpu_svm *svm)
 
 static int interrupt_window_interception(struct vcpu_svm *svm)
 {
-	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, &svm->vcpu);
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 	mark_dirty(svm->vmcb, VMCB_INTR);
@@ -4693,7 +4693,7 @@  static void svm_complete_interrupts(struct vcpu_svm *svm)
 	if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
 	    && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
 		svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
-		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+		kvm_request_set(KVM_REQ_EVENT, &svm->vcpu);
 	}
 
 	svm->vcpu.arch.nmi_injected = false;
@@ -4703,7 +4703,7 @@  static void svm_complete_interrupts(struct vcpu_svm *svm)
 	if (!(exitintinfo & SVM_EXITINTINFO_VALID))
 		return;
 
-	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, &svm->vcpu);
 
 	vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
 	type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ef4ba71dbb66..398d2d5f6d5c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2270,7 +2270,7 @@  static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
 		unsigned long sysenter_esp;
 
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 
 		/*
 		 * Linux uses per-cpu TSS and GDT, so set these when switching
@@ -2459,7 +2459,7 @@  static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 		if (kvm_exception_is_soft(nr))
 			inc_eip = vcpu->arch.event_exit_inst_len;
 		if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
-			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+			kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		return;
 	}
 
@@ -4967,7 +4967,7 @@  static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
 		 * we will accomplish it in the next vmentry.
 		 */
 		vmx->nested.pi_pending = true;
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 		return 0;
 	}
 	return -1;
@@ -5354,7 +5354,7 @@  static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		vmcs_write32(TPR_THRESHOLD, 0);
 	}
 
-	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+	kvm_request_set(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
 	if (kvm_vcpu_apicv_active(vcpu))
 		memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -5431,7 +5431,7 @@  static void vmx_inject_irq(struct kvm_vcpu *vcpu)
 		if (vcpu->arch.interrupt.soft)
 			inc_eip = vcpu->arch.event_exit_inst_len;
 		if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
-			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+			kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		return;
 	}
 	intr = irq | INTR_INFO_VALID_MASK;
@@ -5468,7 +5468,7 @@  static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
 
 	if (vmx->rmode.vm86_active) {
 		if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
-			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+			kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		return;
 	}
 
@@ -6055,7 +6055,7 @@  static int handle_interrupt_window(struct kvm_vcpu *vcpu)
 	vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
 			CPU_BASED_VIRTUAL_INTR_PENDING);
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	++vcpu->stat.irq_window_exits;
 	return 1;
@@ -6322,7 +6322,7 @@  static int handle_nmi_window(struct kvm_vcpu *vcpu)
 	vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
 			CPU_BASED_VIRTUAL_NMI_PENDING);
 	++vcpu->stat.nmi_window_exits;
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	return 1;
 }
@@ -6829,7 +6829,7 @@  static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
 {
 	/* TODO: not to reset guest simply here. */
-	kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+	kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 	pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
 }
 
@@ -6839,7 +6839,7 @@  static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 		container_of(timer, struct vcpu_vmx, nested.preemption_timer);
 
 	vmx->nested.preemption_timer_expired = true;
-	kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
+	kvm_request_set(KVM_REQ_EVENT, &vmx->vcpu);
 	kvm_vcpu_kick(&vmx->vcpu);
 
 	return HRTIMER_NORESTART;
@@ -7281,7 +7281,7 @@  static int handle_vmclear(struct kvm_vcpu *vcpu)
 		 * resulted in this case, so let's shut down before doing any
 		 * more damage:
 		 */
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+		kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		return 1;
 	}
 	vmcs12 = kmap(page);
@@ -7730,7 +7730,7 @@  static int handle_invept(struct kvm_vcpu *vcpu)
 	 */
 	case VMX_EPT_EXTENT_CONTEXT:
 		kvm_mmu_sync_roots(vcpu);
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 		nested_vmx_succeed(vcpu);
 		break;
 	default:
@@ -8834,7 +8834,7 @@  static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
 	if (!idtv_info_valid)
 		return;
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
 	type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
@@ -9132,7 +9132,7 @@  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * nested_run_pending, we need to re-enable this bit.
 	 */
 	if (vmx->nested.nested_run_pending)
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	vmx->nested.nested_run_pending = 0;
 
@@ -11094,7 +11094,7 @@  static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 	 * We are now running in L2, mmu_notifier will force to reload the
 	 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
 	 */
-	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+	kvm_request_set(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
 
 	/*
 	 * Exiting from L2 to L1, we're now back to L1 which thinks it just
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b2a4b11274b0..3ceb1339ed86 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -388,7 +388,7 @@  static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
 	u32 prev_nr;
 	int class1, class2;
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	if (!vcpu->arch.exception.pending) {
 	queue:
@@ -406,7 +406,7 @@  static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
 	prev_nr = vcpu->arch.exception.nr;
 	if (prev_nr == DF_VECTOR) {
 		/* triple fault -> shutdown */
-		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+		kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 		return;
 	}
 	class1 = exception_class(prev_nr);
@@ -469,7 +469,7 @@  static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 {
 	atomic_inc(&vcpu->arch.nmi_queued);
-	kvm_make_request(KVM_REQ_NMI, vcpu);
+	kvm_request_set(KVM_REQ_NMI, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
 
@@ -805,7 +805,7 @@  int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
 		kvm_mmu_sync_roots(vcpu);
-		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+		kvm_request_set(KVM_REQ_TLB_FLUSH, vcpu);
 		return 0;
 	}
 
@@ -1179,7 +1179,7 @@  void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
 	 * vcpu_enter_guest.  This function is only called from
 	 * the physical CPU that is running vcpu.
 	 */
-	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+	kvm_request_set(KVM_REQ_PENDING_TIMER, vcpu);
 }
 
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
@@ -1375,7 +1375,7 @@  static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
 	 */
 	if (ka->use_master_clock ||
 	    (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
-		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 
 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
 			    atomic_read(&vcpu->kvm->online_vcpus),
@@ -1763,7 +1763,7 @@  static void kvm_gen_update_masterclock(struct kvm *kvm)
 	pvclock_update_vm_gtod_copy(kvm);
 
 	kvm_for_each_vcpu(i, vcpu, kvm)
-		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 
 	/* guest entries allowed */
 	kvm_for_each_vcpu(i, vcpu, kvm)
@@ -1890,7 +1890,7 @@  static int kvm_guest_time_update(struct kvm_vcpu *v)
 	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
 	if (unlikely(tgt_tsc_khz == 0)) {
 		local_irq_restore(flags);
-		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+		kvm_request_set(KVM_REQ_CLOCK_UPDATE, v);
 		return 1;
 	}
 	if (!use_master_clock) {
@@ -1976,7 +1976,7 @@  static void kvmclock_update_fn(struct work_struct *work)
 	struct kvm_vcpu *vcpu;
 
 	kvm_for_each_vcpu(i, vcpu, kvm) {
-		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 		kvm_vcpu_kick(vcpu);
 	}
 }
@@ -1985,7 +1985,7 @@  static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
 {
 	struct kvm *kvm = v->kvm;
 
-	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+	kvm_request_set(KVM_REQ_CLOCK_UPDATE, v);
 	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
 					KVMCLOCK_UPDATE_DELAY);
 }
@@ -2235,7 +2235,7 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 
 		vcpu->arch.time = data;
-		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
 
 		/* we verify if the enable bit is set... */
 		if (!(data & 1))
@@ -2272,7 +2272,7 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!(data & KVM_MSR_ENABLED))
 			break;
 
-		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_STEAL_UPDATE, vcpu);
 
 		break;
 	case MSR_KVM_PV_EOI_EN:
@@ -2836,7 +2836,7 @@  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
 		vcpu->arch.tsc_offset_adjustment = 0;
-		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 	}
 
 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
@@ -2860,13 +2860,13 @@  void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		 * kvmclock on vcpu->cpu migration
 		 */
 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
-			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
+			kvm_request_set(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
 		if (vcpu->cpu != cpu)
 			kvm_migrate_timers(vcpu);
 		vcpu->cpu = cpu;
 	}
 
-	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+	kvm_request_set(KVM_REQ_STEAL_UPDATE, vcpu);
 }
 
 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
@@ -2957,7 +2957,7 @@  static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 
 	if (!irqchip_in_kernel(vcpu->kvm)) {
 		kvm_queue_interrupt(vcpu, irq->irq, false);
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 		return 0;
 	}
 
@@ -2972,7 +2972,7 @@  static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 		return -EEXIST;
 
 	vcpu->arch.pending_external_vector = irq->irq;
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 	return 0;
 }
 
@@ -2985,7 +2985,7 @@  static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
 
 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
 {
-	kvm_make_request(KVM_REQ_SMI, vcpu);
+	kvm_request_set(KVM_REQ_SMI, vcpu);
 
 	return 0;
 }
@@ -3051,7 +3051,7 @@  static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
 	if (mce->status & MCI_STATUS_UC) {
 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
-			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+			kvm_request_set(KVM_REQ_TRIPLE_FAULT, vcpu);
 			return 0;
 		}
 		if (banks[1] & MCI_STATUS_VAL)
@@ -3168,7 +3168,7 @@  static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 		}
 	}
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	return 0;
 }
@@ -3371,7 +3371,7 @@  static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
 	if (!vcpu->arch.pv_time_enabled)
 		return -EINVAL;
 	vcpu->arch.pvclock_set_guest_stopped_request = true;
-	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+	kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 	return 0;
 }
 
@@ -5277,7 +5277,7 @@  static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
 	if (unlikely(int_shadow || mask)) {
 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
 		if (!mask)
-			kvm_make_request(KVM_REQ_EVENT, vcpu);
+			kvm_request_set(KVM_REQ_EVENT, vcpu);
 	}
 }
 
@@ -5488,7 +5488,7 @@  static void kvm_smm_changed(struct kvm_vcpu *vcpu)
 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
 
 		/* Process a latched INIT or SMI, if any.  */
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 	}
 
 	kvm_mmu_reset_context(vcpu);
@@ -5732,7 +5732,7 @@  int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		 * because POPF has no interrupt shadow.
 		 */
 		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
-			kvm_make_request(KVM_REQ_EVENT, vcpu);
+			kvm_request_set(KVM_REQ_EVENT, vcpu);
 	} else
 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
 
@@ -5873,7 +5873,7 @@  static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			if (vcpu->cpu != freq->cpu)
 				continue;
-			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+			kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 			if (vcpu->cpu != smp_processor_id())
 				send_ipi = 1;
 		}
@@ -6016,7 +6016,7 @@  static void pvclock_gtod_update_fn(struct work_struct *work)
 	spin_lock(&kvm_lock);
 	list_for_each_entry(kvm, &vm_list, vm_list)
 		kvm_for_each_vcpu(i, vcpu, kvm)
-			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+			kvm_request_set(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 	atomic_set(&kvm_guest_has_master_clock, 0);
 	spin_unlock(&kvm_lock);
 }
@@ -6406,7 +6406,7 @@  static void process_nmi(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 }
 
 #define put_smstate(type, buf, offset, val)			  \
@@ -6641,7 +6641,7 @@  static void enter_smm(struct kvm_vcpu *vcpu)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.smi_pending = true;
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 }
 
 void kvm_make_scan_ioapic_request(struct kvm *kvm)
@@ -6725,50 +6725,50 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	bool req_immediate_exit = false;
 
 	if (vcpu->requests) {
-		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_MMU_RELOAD, vcpu))
 			kvm_mmu_unload(vcpu);
-		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_MIGRATE_TIMER, vcpu))
 			__kvm_migrate_timers(vcpu);
-		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
 			kvm_gen_update_masterclock(vcpu->kvm);
-		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
 			kvm_gen_kvmclock_update(vcpu);
-		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_CLOCK_UPDATE, vcpu)) {
 			r = kvm_guest_time_update(vcpu);
 			if (unlikely(r))
 				goto out;
 		}
-		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_MMU_SYNC, vcpu))
 			kvm_mmu_sync_roots(vcpu);
-		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_TLB_FLUSH, vcpu))
 			kvm_vcpu_flush_tlb(vcpu);
-		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
 			r = 0;
 			goto out;
 		}
-		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_TRIPLE_FAULT, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
 			r = 0;
 			goto out;
 		}
-		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_APF_HALT, vcpu)) {
 			/* Page is swapped out. Do synthetic halt */
 			vcpu->arch.apf.halted = true;
 			r = 1;
 			goto out;
 		}
-		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_STEAL_UPDATE, vcpu))
 			record_steal_time(vcpu);
-		if (kvm_check_request(KVM_REQ_SMI, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_SMI, vcpu))
 			process_smi(vcpu);
-		if (kvm_check_request(KVM_REQ_NMI, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_NMI, vcpu))
 			process_nmi(vcpu);
-		if (kvm_check_request(KVM_REQ_PMU, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_PMU, vcpu))
 			kvm_pmu_handle_event(vcpu);
-		if (kvm_check_request(KVM_REQ_PMI, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_PMI, vcpu))
 			kvm_pmu_deliver_pmi(vcpu);
-		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
 			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
 			if (test_bit(vcpu->arch.pending_ioapic_eoi,
 				     vcpu->arch.ioapic_handled_vectors)) {
@@ -6779,23 +6779,23 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 				goto out;
 			}
 		}
-		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_SCAN_IOAPIC, vcpu))
 			vcpu_scan_ioapic(vcpu);
-		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
 			kvm_vcpu_reload_apic_access_page(vcpu);
-		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_HV_CRASH, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
 			r = 0;
 			goto out;
 		}
-		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_HV_RESET, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
 			r = 0;
 			goto out;
 		}
-		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
+		if (kvm_request_test_and_clear(KVM_REQ_HV_EXIT, vcpu)) {
 			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
 			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
 			r = 0;
@@ -6807,11 +6807,11 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
 		 * depend on the guest clock being up-to-date
 		 */
-		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
+		if (kvm_request_test_and_clear(KVM_REQ_HV_STIMER, vcpu))
 			kvm_hv_process_stimers(vcpu);
 	}
 
-	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+	if (kvm_request_test_and_clear(KVM_REQ_EVENT, vcpu) || req_int_win) {
 		++vcpu->stat.req_event;
 		kvm_apic_accept_events(vcpu);
 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -6902,7 +6902,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	kvm_load_guest_xcr0(vcpu);
 
 	if (req_immediate_exit) {
-		kvm_make_request(KVM_REQ_EVENT, vcpu);
+		kvm_request_set(KVM_REQ_EVENT, vcpu);
 		smp_send_reschedule(vcpu->cpu);
 	}
 
@@ -6974,7 +6974,7 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	}
 
 	if (unlikely(vcpu->arch.tsc_always_catchup))
-		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+		kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 
 	if (vcpu->arch.apic_attention)
 		kvm_lapic_sync_from_vapic(vcpu);
@@ -7001,7 +7001,7 @@  static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 		if (kvm_x86_ops->post_block)
 			kvm_x86_ops->post_block(vcpu);
 
-		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
+		if (!kvm_request_test_and_clear(KVM_REQ_UNHALT, vcpu))
 			return 1;
 	}
 
@@ -7279,7 +7279,7 @@  int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
 	vcpu->arch.exception.pending = false;
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	return 0;
 }
@@ -7358,7 +7358,7 @@  int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
 	} else
 		vcpu->arch.mp_state = mp_state->mp_state;
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 	return 0;
 }
 
@@ -7378,7 +7378,7 @@  int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 
 	kvm_rip_write(vcpu, ctxt->eip);
 	kvm_set_rflags(vcpu, ctxt->eflags);
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 	return EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -7459,7 +7459,7 @@  int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 	    !is_protmode(vcpu))
 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 
 	return 0;
 }
@@ -7709,7 +7709,7 @@  void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 
 	vcpu->arch.cr2 = 0;
 
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 	vcpu->arch.apf.msr_val = 0;
 	vcpu->arch.st.msr_val = 0;
 
@@ -7762,7 +7762,7 @@  int kvm_arch_hardware_enable(void)
 	list_for_each_entry(kvm, &vm_list, vm_list) {
 		kvm_for_each_vcpu(i, vcpu, kvm) {
 			if (!stable && vcpu->cpu == smp_processor_id())
-				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+				kvm_request_set(KVM_REQ_CLOCK_UPDATE, vcpu);
 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
 				backwards_tsc = true;
 				if (vcpu->arch.last_host_tsc > max_tsc)
@@ -7816,7 +7816,7 @@  int kvm_arch_hardware_enable(void)
 			kvm_for_each_vcpu(i, vcpu, kvm) {
 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
 				vcpu->arch.last_host_tsc = local_tsc;
-				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+				kvm_request_set(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 			}
 
 			/*
@@ -8447,7 +8447,7 @@  static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
 	__kvm_set_rflags(vcpu, rflags);
-	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	kvm_request_set(KVM_REQ_EVENT, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
@@ -8548,7 +8548,7 @@  void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
 	    (vcpu->arch.apf.send_user_only &&
 	     kvm_x86_ops->get_cpl(vcpu) == 0))
-		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+		kvm_request_set(KVM_REQ_APF_HALT, vcpu);
 	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
 		fault.vector = PF_VECTOR;
 		fault.error_code_valid = true;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8d69d5150748..4cf61ee84b2e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1084,24 +1084,45 @@  static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+/*
+ * An API for setting KVM requests.
+ * The general API design is inspired by bit_* API.
+ *
+ * A request can be set either to itself or to a remote VCPU.  If the request
+ * is set to a remote VCPU, then the VCPU needs to be notified, which is
+ * usually done with kvm_vcpu_kick().
+ * The request can also mean that some data is ready, so a remote requests
+ * needs a smp_wmb().  i.e. there are three main types of requests:
+ *  1) local request
+ *  2) remote request with no data (= kick)
+ *  3) remote request with data (= kick + mb)
+ *
+ * TODO:
+ *  - completely encapsulate vcpu->requests
+ *  - do not use memory barrier in (1) and (2)
+ *  - let architectures define custom vcpu kick
+ *  - add kick when setting remote request
+ */
+static inline void kvm_request_set(unsigned req, struct kvm_vcpu *vcpu)
 {
 	/*
-	 * Ensure the rest of the request is published to kvm_check_request's
-	 * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
+	 * Ensure the rest of the request is published to
+	 * kvm_request_test_and_clear's caller.
+	 * Paired with the smp_mb__after_atomic in kvm_request_test_and_clear.
 	 */
 	smp_wmb();
 	set_bit(req, &vcpu->requests);
 }
 
-static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
+static inline bool kvm_request_test_and_clear(unsigned req, struct kvm_vcpu *vcpu)
 {
 	if (test_bit(req, &vcpu->requests)) {
 		clear_bit(req, &vcpu->requests);
 
 		/*
-		 * Ensure the rest of the request is visible to kvm_check_request's
-		 * caller.  Paired with the smp_wmb in kvm_make_request.
+		 * Ensure the rest of the request is visible to
+		 * kvm_request_test_and_clear's caller.
+		 * Paired with the smp_wmb in kvm_request_set.
 		 */
 		smp_mb__after_atomic();
 		return true;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cc4d6e0dd2a2..0ee1df3bfcb8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -178,7 +178,7 @@  bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
 
 	me = get_cpu();
 	kvm_for_each_vcpu(i, vcpu, kvm) {
-		kvm_make_request(req, vcpu);
+		kvm_request_set(req, vcpu);
 		cpu = vcpu->cpu;
 
 		/* Set ->requests bit before we read ->mode. */
@@ -2127,7 +2127,7 @@  static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 {
 	if (kvm_arch_vcpu_runnable(vcpu)) {
-		kvm_make_request(KVM_REQ_UNHALT, vcpu);
+		kvm_request_set(KVM_REQ_UNHALT, vcpu);
 		return -EINTR;
 	}
 	if (kvm_cpu_has_pending_timer(vcpu))