===================================================================
@@ -142,40 +142,6 @@ void kvm_vcpu_ipi(struct kvm_vcpu *vcpu)
put_cpu();
}
-static void ack_flush(void *_completed)
-{
-}
-
-static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
-{
- int i, cpu, me;
- cpumask_var_t cpus;
- bool called = true;
- struct kvm_vcpu *vcpu;
-
- if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
- cpumask_clear(cpus);
-
- spin_lock(&kvm->requests_lock);
- me = smp_processor_id();
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (test_and_set_bit(req, &vcpu->requests))
- continue;
- cpu = vcpu->cpu;
- if (cpus != NULL && cpu != -1 && cpu != me)
- cpumask_set_cpu(cpu, cpus);
- }
- if (unlikely(cpus == NULL))
- smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
- else if (!cpumask_empty(cpus))
- smp_call_function_many(cpus, ack_flush, NULL, 1);
- else
- called = false;
- spin_unlock(&kvm->requests_lock);
- free_cpumask_var(cpus);
- return called;
-}
-
static int kvm_req_wait(void *unused)
{
cpu_relax();
@@ -415,7 +381,6 @@ static struct kvm *kvm_create_vm(void)
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
- spin_lock_init(&kvm->requests_lock);
kvm_io_bus_init(&kvm->pio_bus);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
===================================================================
@@ -157,7 +157,6 @@ struct kvm_irq_routing_table {};
struct kvm {
spinlock_t mmu_lock;
- spinlock_t requests_lock;
struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
Obsoleted by kvm_vcpus_request Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>