@@ -943,7 +943,6 @@ struct kvm_arch {
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
struct kvm_apic_map *apic_map;
- bool apic_map_dirty;
bool apic_access_page_done;
unsigned long apicv_inhibit_reasons;
@@ -81,7 +81,6 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
-void kvm_recalculate_apic_map(struct kvm *kvm);
void kvm_apic_set_version(struct kvm_vcpu *vcpu);
int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val);
int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
@@ -169,28 +169,14 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map);
}
-void kvm_recalculate_apic_map(struct kvm *kvm)
+static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
u32 max_id = 255; /* enough space for any xAPIC ID */
- if (!kvm->arch.apic_map_dirty) {
- /*
- * Read kvm->arch.apic_map_dirty before
- * kvm->arch.apic_map
- */
- smp_rmb();
- return;
- }
-
mutex_lock(&kvm->arch.apic_map_lock);
- if (!kvm->arch.apic_map_dirty) {
- /* Someone else has updated the map. */
- mutex_unlock(&kvm->arch.apic_map_lock);
- return;
- }
kvm_for_each_vcpu(i, vcpu, kvm)
if (kvm_apic_present(vcpu))
@@ -255,12 +241,6 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
- /*
- * Write kvm->arch.apic_map before
- * clearing apic->apic_map_dirty
- */
- smp_wmb();
- kvm->arch.apic_map_dirty = false;
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
@@ -282,20 +262,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else
static_key_slow_inc(&apic_sw_disabled.key);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(apic->vcpu->kvm);
}
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(apic->vcpu->kvm);
}
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
kvm_lapic_set_reg(apic, APIC_LDR, id);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(apic->vcpu->kvm);
}
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -311,7 +291,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(apic->vcpu->kvm);
}
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -1976,7 +1956,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR:
if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
- apic->vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(apic->vcpu->kvm);
} else
ret = 1;
break;
@@ -2082,8 +2062,6 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
}
- kvm_recalculate_apic_map(apic->vcpu->kvm);
-
return ret;
}
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
@@ -2232,7 +2210,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled);
} else {
static_key_slow_inc(&apic_hw_disabled.key);
- vcpu->kvm->arch.apic_map_dirty = true;
+ recalculate_apic_map(vcpu->kvm);
}
}
@@ -2273,7 +2251,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic)
return;
- vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
@@ -2325,8 +2302,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.apic_arb_prio = 0;
vcpu->arch.apic_attention = 0;
-
- kvm_recalculate_apic_map(vcpu->kvm);
}
/*
@@ -2556,18 +2531,17 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
struct kvm_lapic *apic = vcpu->arch.apic;
int r;
+
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
r = kvm_apic_state_fixup(vcpu, s, true);
- if (r) {
- kvm_recalculate_apic_map(vcpu->kvm);
+ if (r)
return r;
- }
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
- kvm_recalculate_apic_map(vcpu->kvm);
+ recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
@@ -374,7 +374,6 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
kvm_lapic_set_base(vcpu, msr_info->data);
- kvm_recalculate_apic_map(vcpu->kvm);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
Guest fails to online hotplugged CPU with error smpboot: do_boot_cpu failed(-1) to wakeup CPU#4 It's caused by the fact that kvm_apic_set_state(), which used to call recalculate_apic_map() unconditionally and pulled hotplugged CPU into apic map, is updating map conditionally on state change which doesn't happen in this case and apic map update is skipped. Also subj commit introduces a race which can make apic map update events loss, in case one CPU is already in process of updating apic map and another CPU flags map as dirty asking for another update. But the first CPU will clear update request set by another CPU when update in progress is finished. CPU1 kvm_recalculate_apic_map() .. updating CPU2 apic_map_dirty = true CPU1 apic_map_dirty = false CPU2 kvm_recalculate_apic_map() nop due to apic_map_dirty == false This reverts commit 4abaffce4d25aa41392d2e81835592726d757857. Signed-off-by: Igor Mammedov <imammedo@redhat.com> --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/lapic.h | 1 - arch/x86/kvm/lapic.c | 46 +++++++-------------------------- arch/x86/kvm/x86.c | 1 - 4 files changed, 10 insertions(+), 39 deletions(-)