@@ -833,16 +833,23 @@ static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
cpuhp_node);
- if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
+ raw_spin_lock(&dsu_pmu->pmu_lock);
+ if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu)) {
+ raw_spin_unlock(&dsu_pmu->pmu_lock);
return 0;
+ }
dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
- if (dst >= nr_cpu_ids)
+ if (dst >= nr_cpu_ids) {
+ raw_spin_unlock(&dsu_pmu->pmu_lock);
return 0;
+ }
- perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
+ /* dst should not be in dying mask. So after setting, blocking parallel */
dsu_pmu_set_active_cpu(dst, dsu_pmu);
+ raw_spin_unlock(&dsu_pmu->pmu_lock);
+ perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
return 0;
}
@@ -858,6 +865,7 @@ static int __init dsu_pmu_init(void)
if (ret < 0)
return ret;
dsu_pmu_cpuhp_state = ret;
+ cpuhp_set_step_parallel(ret);
return platform_driver_register(&dsu_pmu_driver);
}
In the case of kexec quick reboot, dsu_pmu_cpu_teardown() confronts parallel and lock are needed to protect the contest on a dsu_pmu. Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Cc: Will Deacon <will@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> To: linux-arm-kernel@lists.infradead.org To: linux-kernel@vger.kernel.org --- drivers/perf/arm_dsu_pmu.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-)