@@ -274,8 +274,8 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select HAVE_GENERIC_VDSO
- select HOTPLUG_CORE_SYNC_FULL if SMP
select HOTPLUG_SMT if SMP
+ select HOTPLUG_SPLIT_STARTUP if SMP
select IRQ_FORCED_THREADING
select NEED_PER_CPU_EMBED_FIRST_CHUNK
select NEED_PER_CPU_PAGE_FIRST_CHUNK
@@ -40,7 +40,7 @@ struct smp_ops {
void (*cleanup_dead_cpu)(unsigned cpu);
void (*poll_sync_state)(void);
- int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
+ int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
@@ -80,11 +80,6 @@ static inline void smp_cpus_done(unsigne
smp_ops.smp_cpus_done(max_cpus);
}
-static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
-{
- return smp_ops.cpu_up(cpu, tidle);
-}
-
static inline int __cpu_disable(void)
{
return smp_ops.cpu_disable();
@@ -124,7 +119,7 @@ void native_smp_prepare_cpus(unsigned in
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
-int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
int native_cpu_disable(void);
void __noreturn hlt_play_dead(void);
void native_play_dead(void);
@@ -268,7 +268,7 @@ struct smp_ops smp_ops = {
#endif
.smp_send_reschedule = native_smp_send_reschedule,
- .cpu_up = native_cpu_up,
+ .kick_ap_alive = native_kick_ap,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
@@ -1052,7 +1052,7 @@ static int do_boot_cpu(int apicid, int c
return ret;
}
-static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
+int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
{
int apicid = apic->cpu_present_to_apicid(cpu);
int err;
@@ -1088,15 +1088,15 @@ static int native_kick_ap(unsigned int c
return err;
}
-int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
{
- return native_kick_ap(cpu, tidle);
+ return smp_ops.kick_ap_alive(cpu, tidle);
}
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
{
/* Cleanup possible dangling ends... */
- if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset)
+ if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset)
smpboot_restore_warm_reset_vector();
}
@@ -314,7 +314,7 @@ cpu_initialize_context(unsigned int cpu,
return 0;
}
-static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
+static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
{
int rc;
@@ -438,7 +438,7 @@ static const struct smp_ops xen_smp_ops
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
.smp_cpus_done = xen_smp_cpus_done,
- .cpu_up = xen_pv_cpu_up,
+ .kick_ap_alive = xen_pv_kick_ap,
.cpu_die = xen_pv_cpu_die,
.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
.poll_sync_state = xen_pv_poll_sync_state,