@@ -247,7 +247,7 @@ static void ack_flush(void *_completed)
static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
{
- if (unlikely(!cpus))
+ if (IS_ENABLED(CONFIG_CPUMASK_OFFSTACK) && unlikely(!cpus))
cpus = cpu_online_mask;
if (cpumask_empty(cpus))
@@ -277,6 +277,14 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
continue;
+ /*
+ * tmp can be NULL if cpumasks are allocated off stack, as
+ * allocation of the mask is deliberately not fatal and is
+ * handled by falling back to kicking all online CPUs.
+ */
+ if (IS_ENABLED(CONFIG_CPUMASK_OFFSTACK) && !tmp)
+ continue;
+
/*
* Note, the vCPU could get migrated to a different pCPU at any
* point after kvm_request_needs_ipi(), which could result in
@@ -288,7 +296,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
* were reading SPTEs _before_ any changes were finalized. See
* kvm_vcpu_kick() for more details on handling requests.
*/
- if (tmp != NULL && kvm_request_needs_ipi(vcpu, req)) {
+ if (kvm_request_needs_ipi(vcpu, req)) {
cpu = READ_ONCE(vcpu->cpu);
if (cpu != -1 && cpu != me)
__cpumask_set_cpu(cpu, tmp);