@@ -409,7 +409,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
}
static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
- unsigned long bitmap)
+ unsigned long *bitmap)
{
int last;
int next;
@@ -421,7 +421,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
do {
if (++next == KVM_MAX_VCPUS)
next = 0;
- if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
+ if (kvm->vcpus[next] == NULL || !test_bit(next, bitmap))
continue;
apic = kvm->vcpus[next]->arch.apic;
if (apic && apic_enabled(apic))
@@ -437,7 +437,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
}
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
- unsigned long bitmap)
+ unsigned long *bitmap)
{
struct kvm_lapic *apic;
@@ -508,7 +508,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
}
if (delivery_mode == APIC_DM_LOWEST) {
- target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
+ target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, &lpr_map);
if (target != NULL)
__apic_accept_irq(target->arch.apic, delivery_mode,
vector, level, trig_mode);
@@ -354,7 +354,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
union kvm_ioapic_redirect_entry *entry,
- u32 *deliver_bitmask);
+ unsigned long *deliver_bitmask);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
@@ -162,7 +162,7 @@ static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
}
void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
- u8 dest_mode, u32 *mask)
+ u8 dest_mode, unsigned long *mask)
{
int i;
struct kvm *kvm = ioapic->kvm;
@@ -203,7 +203,7 @@ void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
- u32 deliver_bitmask;
+ unsigned long deliver_bitmask;
struct kvm_vcpu *vcpu;
int vcpu_id, r = 0;
@@ -65,12 +65,12 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
}
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
- unsigned long bitmap);
+ unsigned long *bitmap);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
int kvm_ioapic_init(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
void kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
- u8 dest_mode, u32 *mask);
+ u8 dest_mode, unsigned long *mask);
#endif
@@ -45,7 +45,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
union kvm_ioapic_redirect_entry *entry,
- u32 *deliver_bitmask)
+ unsigned long *deliver_bitmask)
{
struct kvm_vcpu *vcpu;
@@ -55,7 +55,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
switch (entry->fields.delivery_mode) {
case IOAPIC_LOWEST_PRIORITY:
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
- entry->fields.vector, *deliver_bitmask);
+ entry->fields.vector, deliver_bitmask);
*deliver_bitmask = 1 << vcpu->vcpu_id;
break;
case IOAPIC_FIXED:
@@ -76,7 +76,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm_vcpu *vcpu;
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
union kvm_ioapic_redirect_entry entry;
- u32 deliver_bitmask;
+ unsigned long deliver_bitmask;
BUG_ON(!ioapic);
Would be used with bit ops, and would be easily extended if KVM_MAX_VCPUS is increased. Signed-off-by: Sheng Yang <sheng@linux.intel.com> --- arch/x86/kvm/lapic.c | 8 ++++---- include/linux/kvm_host.h | 2 +- virt/kvm/ioapic.c | 4 ++-- virt/kvm/ioapic.h | 4 ++-- virt/kvm/irq_comm.c | 6 +++--- 5 files changed, 12 insertions(+), 12 deletions(-)