diff mbox series

[v4,3/5] arm64: Introduce get_cpu_ops() helper function

Message ID 20200226002356.86986-4-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series arm64: Dereference CPU operations indirectly | expand

Commit Message

Gavin Shan Feb. 26, 2020, 12:23 a.m. UTC
This introduces get_cpu_ops() to return the CPU operations according to
the given CPU index. For now, it simply returns the @cpu_ops[cpu] as
before. So it shouldn't introduce any functional changes.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/cpu_ops.h |  2 +-
 arch/arm64/kernel/cpu_ops.c      |  7 +++-
 arch/arm64/kernel/cpuidle.c      |  9 ++---
 arch/arm64/kernel/setup.c        |  6 ++--
 arch/arm64/kernel/smp.c          | 57 ++++++++++++++++++++++----------
 5 files changed, 55 insertions(+), 26 deletions(-)

Comments

Mark Rutland March 17, 2020, 10:48 a.m. UTC | #1
On Wed, Feb 26, 2020 at 11:23:54AM +1100, Gavin Shan wrote:
> This introduces get_cpu_ops() to return the CPU operations according to
> the given CPU index. For now, it simply returns the @cpu_ops[cpu] as
> before. So it shouldn't introduce any functional changes.
> 
> Signed-off-by: Gavin Shan <gshan@redhat.com>

Generally this looks good to me; I like that it simplifies the cases
where we get the ops repeatedly today.

I have one comment below.

> @@ -383,6 +392,7 @@ void cpu_die(void)
>  void cpu_die_early(void)
>  {
>  	int cpu = smp_processor_id();
> +	const struct cpu_operations *ops = get_cpu_ops(cpu);
>  
>  	pr_crit("CPU%d: will not boot\n", cpu);
>  
> @@ -392,8 +402,8 @@ void cpu_die_early(void)
>  #ifdef CONFIG_HOTPLUG_CPU
>  	update_cpu_boot_status(CPU_KILL_ME);
>  	/* Check if we can park ourselves */
> -	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
> -		cpu_ops[cpu]->cpu_die(cpu);
> +	if (ops && ops->cpu_die)
> +		ops->cpu_die(cpu);
>  #endif

Can we factor this out the die logic into a helper:

| static void __cpu_try_die(int cpu)
| {
| #ifdef CONFIG_HOTPLUG_CPU
| 	const struct cpu_operations *ops = get_cpu_ops(cpu);
| 	if (ops && ops->cpu_die)
| 		ops->cpu_die(cpu);
| #endif
| }

... with cpu_die_early() having:

| if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
| 	update_cpu_boot_status(CPU_KILL_ME);
|	__cpu_try_die(cpu);
| }

... and likewise in ipi_cpu_crash_stop(), without the
update_cpu_boot_status() ...

> @@ -855,6 +870,10 @@ static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
>  
>  static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
>  {
> +#ifdef CONFIG_HOTPLUG_CPU
> +	const struct cpu_operations *ops;
> +#endif

... where this can go ...

> +
>  #ifdef CONFIG_KEXEC_CORE
>  	crash_save_cpu(regs, cpu);
>  
> @@ -864,8 +883,9 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
>  	sdei_mask_local_cpu();
>  
>  #ifdef CONFIG_HOTPLUG_CPU
> -	if (cpu_ops[cpu]->cpu_die)
> -		cpu_ops[cpu]->cpu_die(cpu);
> +	ops = get_cpu_ops(cpu);
> +	if (ops->cpu_die)
> +		ops->cpu_die(cpu);
>  #endif

... and this can be:

| if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
| 	__cpu_try_die(cpu);

Thanks,
Mark.
Gavin Shan March 18, 2020, 2:22 a.m. UTC | #2
On 3/17/20 9:48 PM, Mark Rutland wrote:
> On Wed, Feb 26, 2020 at 11:23:54AM +1100, Gavin Shan wrote:
>> This introduces get_cpu_ops() to return the CPU operations according to
>> the given CPU index. For now, it simply returns the @cpu_ops[cpu] as
>> before. So it shouldn't introduce any functional changes.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
> 
> Generally this looks good to me; I like that it simplifies the cases
> where we get the ops repeatedly today.
> 
> I have one comment below.
> 
>> @@ -383,6 +392,7 @@ void cpu_die(void)
>>   void cpu_die_early(void)
>>   {
>>   	int cpu = smp_processor_id();
>> +	const struct cpu_operations *ops = get_cpu_ops(cpu);
>>   
>>   	pr_crit("CPU%d: will not boot\n", cpu);
>>   
>> @@ -392,8 +402,8 @@ void cpu_die_early(void)
>>   #ifdef CONFIG_HOTPLUG_CPU
>>   	update_cpu_boot_status(CPU_KILL_ME);
>>   	/* Check if we can park ourselves */
>> -	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
>> -		cpu_ops[cpu]->cpu_die(cpu);
>> +	if (ops && ops->cpu_die)
>> +		ops->cpu_die(cpu);
>>   #endif
> 
> Can we factor this out the die logic into a helper:
> 
> | static void __cpu_try_die(int cpu)
> | {
> | #ifdef CONFIG_HOTPLUG_CPU
> | 	const struct cpu_operations *ops = get_cpu_ops(cpu);
> | 	if (ops && ops->cpu_die)
> | 		ops->cpu_die(cpu);
> | #endif
> | }
> 
> ... with cpu_die_early() having:
> 
> | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
> | 	update_cpu_boot_status(CPU_KILL_ME);
> |	__cpu_try_die(cpu);
> | }
> 
> ... and likewise in ipi_cpu_crash_stop(), without the
> update_cpu_boot_status() ...
> 
>> @@ -855,6 +870,10 @@ static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
>>   
>>   static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
>>   {
>> +#ifdef CONFIG_HOTPLUG_CPU
>> +	const struct cpu_operations *ops;
>> +#endif
> 
> ... where this can go ...
> 
>> +
>>   #ifdef CONFIG_KEXEC_CORE
>>   	crash_save_cpu(regs, cpu);
>>   
>> @@ -864,8 +883,9 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
>>   	sdei_mask_local_cpu();
>>   
>>   #ifdef CONFIG_HOTPLUG_CPU
>> -	if (cpu_ops[cpu]->cpu_die)
>> -		cpu_ops[cpu]->cpu_die(cpu);
>> +	ops = get_cpu_ops(cpu);
>> +	if (ops->cpu_die)
>> +		ops->cpu_die(cpu);
>>   #endif
> 
> ... and this can be:
> 
> | if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
> | 	__cpu_try_die(cpu);
> 
> Thanks,
> Mark.
> 

Thanks for the detailed comments. With it, the code looks cleaner. I'll
have this in next revision (v5).

Thanks,
Gavin
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index baa13b5db2ca..d28e8f37d3b4 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -55,8 +55,8 @@  struct cpu_operations {
 #endif
 };
 
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
 int __init init_cpu_ops(int cpu);
+extern const struct cpu_operations *get_cpu_ops(int cpu);
 
 static inline void __init init_bootcpu_ops(void)
 {
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index a6c3c816b618..e133011f64b5 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -20,7 +20,7 @@  extern const struct cpu_operations acpi_parking_protocol_ops;
 #endif
 extern const struct cpu_operations cpu_psci_ops;
 
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+static const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
 static const struct cpu_operations *const dt_supported_cpu_ops[] __initconst = {
 	&smp_spin_table_ops,
@@ -111,3 +111,8 @@  int __init init_cpu_ops(int cpu)
 
 	return 0;
 }
+
+const struct cpu_operations *get_cpu_ops(int cpu)
+{
+	return cpu_ops[cpu];
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index e4d6af2fdec7..b512b5503f6e 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -18,11 +18,11 @@ 
 
 int arm_cpuidle_init(unsigned int cpu)
 {
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
 	int ret = -EOPNOTSUPP;
 
-	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
-			cpu_ops[cpu]->cpu_init_idle)
-		ret = cpu_ops[cpu]->cpu_init_idle(cpu);
+	if (ops && ops->cpu_suspend && ops->cpu_init_idle)
+		ret = ops->cpu_init_idle(cpu);
 
 	return ret;
 }
@@ -37,8 +37,9 @@  int arm_cpuidle_init(unsigned int cpu)
 int arm_cpuidle_suspend(int index)
 {
 	int cpu = smp_processor_id();
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
 
-	return cpu_ops[cpu]->cpu_suspend(index);
+	return ops->cpu_suspend(index);
 }
 
 #ifdef CONFIG_ACPI
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f66bd260cce8..3fd2c11c09fc 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -371,8 +371,10 @@  void __init setup_arch(char **cmdline_p)
 static inline bool cpu_can_disable(unsigned int cpu)
 {
 #ifdef CONFIG_HOTPLUG_CPU
-	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
-		return cpu_ops[cpu]->cpu_can_disable(cpu);
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+	if (ops && ops->cpu_can_disable)
+		return ops->cpu_can_disable(cpu);
 #endif
 	return false;
 }
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6f8477d7f3be..5e1af1a3c521 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -93,8 +93,10 @@  static inline int op_cpu_kill(unsigned int cpu)
  */
 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-	if (cpu_ops[cpu]->cpu_boot)
-		return cpu_ops[cpu]->cpu_boot(cpu);
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
+
+	if (ops->cpu_boot)
+		return ops->cpu_boot(cpu);
 
 	return -EOPNOTSUPP;
 }
@@ -196,6 +198,7 @@  asmlinkage notrace void secondary_start_kernel(void)
 {
 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
 	struct mm_struct *mm = &init_mm;
+	const struct cpu_operations *ops;
 	unsigned int cpu;
 
 	cpu = task_cpu(current);
@@ -227,8 +230,9 @@  asmlinkage notrace void secondary_start_kernel(void)
 	 */
 	check_local_cpu_capabilities();
 
-	if (cpu_ops[cpu]->cpu_postboot)
-		cpu_ops[cpu]->cpu_postboot();
+	ops = get_cpu_ops(cpu);
+	if (ops->cpu_postboot)
+		ops->cpu_postboot();
 
 	/*
 	 * Log the CPU info before it is marked online and might get read.
@@ -266,19 +270,21 @@  asmlinkage notrace void secondary_start_kernel(void)
 #ifdef CONFIG_HOTPLUG_CPU
 static int op_cpu_disable(unsigned int cpu)
 {
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
+
 	/*
 	 * If we don't have a cpu_die method, abort before we reach the point
 	 * of no return. CPU0 may not have an cpu_ops, so test for it.
 	 */
-	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+	if (!ops || !ops->cpu_die)
 		return -EOPNOTSUPP;
 
 	/*
 	 * We may need to abort a hot unplug for some other mechanism-specific
 	 * reason.
 	 */
-	if (cpu_ops[cpu]->cpu_disable)
-		return cpu_ops[cpu]->cpu_disable(cpu);
+	if (ops->cpu_disable)
+		return ops->cpu_disable(cpu);
 
 	return 0;
 }
@@ -314,15 +320,17 @@  int __cpu_disable(void)
 
 static int op_cpu_kill(unsigned int cpu)
 {
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
+
 	/*
 	 * If we have no means of synchronising with the dying CPU, then assume
 	 * that it is really dead. We can only wait for an arbitrary length of
 	 * time and hope that it's dead, so let's skip the wait and just hope.
 	 */
-	if (!cpu_ops[cpu]->cpu_kill)
+	if (!ops->cpu_kill)
 		return 0;
 
-	return cpu_ops[cpu]->cpu_kill(cpu);
+	return ops->cpu_kill(cpu);
 }
 
 /*
@@ -357,6 +365,7 @@  void __cpu_die(unsigned int cpu)
 void cpu_die(void)
 {
 	unsigned int cpu = smp_processor_id();
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
 
 	idle_task_exit();
 
@@ -370,7 +379,7 @@  void cpu_die(void)
 	 * mechanism must perform all required cache maintenance to ensure that
 	 * no dirty lines are lost in the process of shutting down the CPU.
 	 */
-	cpu_ops[cpu]->cpu_die(cpu);
+	ops->cpu_die(cpu);
 
 	BUG();
 }
@@ -383,6 +392,7 @@  void cpu_die(void)
 void cpu_die_early(void)
 {
 	int cpu = smp_processor_id();
+	const struct cpu_operations *ops = get_cpu_ops(cpu);
 
 	pr_crit("CPU%d: will not boot\n", cpu);
 
@@ -392,8 +402,8 @@  void cpu_die_early(void)
 #ifdef CONFIG_HOTPLUG_CPU
 	update_cpu_boot_status(CPU_KILL_ME);
 	/* Check if we can park ourselves */
-	if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
-		cpu_ops[cpu]->cpu_die(cpu);
+	if (ops && ops->cpu_die)
+		ops->cpu_die(cpu);
 #endif
 	update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
 
@@ -488,10 +498,13 @@  static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
  */
 static int __init smp_cpu_setup(int cpu)
 {
+	const struct cpu_operations *ops;
+
 	if (init_cpu_ops(cpu))
 		return -ENODEV;
 
-	if (cpu_ops[cpu]->cpu_init(cpu))
+	ops = get_cpu_ops(cpu);
+	if (ops->cpu_init(cpu))
 		return -ENODEV;
 
 	set_cpu_possible(cpu, true);
@@ -714,6 +727,7 @@  void __init smp_init_cpus(void)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
+	const struct cpu_operations *ops;
 	int err;
 	unsigned int cpu;
 	unsigned int this_cpu;
@@ -744,10 +758,11 @@  void __init smp_prepare_cpus(unsigned int max_cpus)
 		if (cpu == smp_processor_id())
 			continue;
 
-		if (!cpu_ops[cpu])
+		ops = get_cpu_ops(cpu);
+		if (!ops)
 			continue;
 
-		err = cpu_ops[cpu]->cpu_prepare(cpu);
+		err = ops->cpu_prepare(cpu);
 		if (err)
 			continue;
 
@@ -855,6 +870,10 @@  static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
 
 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 {
+#ifdef CONFIG_HOTPLUG_CPU
+	const struct cpu_operations *ops;
+#endif
+
 #ifdef CONFIG_KEXEC_CORE
 	crash_save_cpu(regs, cpu);
 
@@ -864,8 +883,9 @@  static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
 	sdei_mask_local_cpu();
 
 #ifdef CONFIG_HOTPLUG_CPU
-	if (cpu_ops[cpu]->cpu_die)
-		cpu_ops[cpu]->cpu_die(cpu);
+	ops = get_cpu_ops(cpu);
+	if (ops->cpu_die)
+		ops->cpu_die(cpu);
 #endif
 
 	/* just in case */
@@ -1044,8 +1064,9 @@  static bool have_cpu_die(void)
 {
 #ifdef CONFIG_HOTPLUG_CPU
 	int any_cpu = raw_smp_processor_id();
+	const struct cpu_operations *ops = get_cpu_ops(any_cpu);
 
-	if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
+	if (ops && ops->cpu_die)
 		return true;
 #endif
 	return false;