diff mbox series

[kvm-unit-tests,v2,2/6] lib: s390x: smp: refactor smp functions to accept indexes

Message ID 20220204130855.39520-3-imbrenda@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series s390x: smp: use CPU indexes instead of addresses | expand

Commit Message

Claudio Imbrenda Feb. 4, 2022, 1:08 p.m. UTC
Refactor all the smp_* functions to accept CPU indexes instead of CPU
addresses.

Add SIGP wrappers to use indexes instead of addresses. Raw SIGP calls
using addresses are still possible.

Add a few other useful functions to deal with CPU indexes.

Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 lib/s390x/smp.h |  20 ++++---
 lib/s390x/smp.c | 148 ++++++++++++++++++++++++++++--------------------
 2 files changed, 99 insertions(+), 69 deletions(-)

Comments

Nico Boehr Feb. 14, 2022, 4:05 p.m. UTC | #1
On Fri, 2022-02-04 at 14:08 +0100, Claudio Imbrenda wrote:
> Refactor all the smp_* functions to accept CPU indexes instead of CPU
> addresses.
> 
> Add SIGP wrappers to use indexes instead of addresses. Raw SIGP calls
> using addresses are still possible.
> 
> Add a few other useful functions to deal with CPU indexes.
> 
> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>

Reviewed-by: Nico Boehr <nrb@linux.ibm.com>
Steffen Eiden Feb. 15, 2022, 11:09 a.m. UTC | #2
On 2/4/22 14:08, Claudio Imbrenda wrote:
> Refactor all the smp_* functions to accept CPU indexes instead of CPU
> addresses.
> 
> Add SIGP wrappers to use indexes instead of addresses. Raw SIGP calls
> using addresses are still possible.
> 
> Add a few other useful functions to deal with CPU indexes.
> 
> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> ---
>   lib/s390x/smp.h |  20 ++++---
>   lib/s390x/smp.c | 148 ++++++++++++++++++++++++++++--------------------
>   2 files changed, 99 insertions(+), 69 deletions(-)
> 
> diff --git a/lib/s390x/smp.h b/lib/s390x/smp.h
> index a2609f11..1e69a7de 100644
> --- a/lib/s390x/smp.h
> +++ b/lib/s390x/smp.h
> @@ -37,15 +37,19 @@ struct cpu_status {
>   
>   int smp_query_num_cpus(void);
>   struct cpu *smp_cpu_from_addr(uint16_t addr);
> -bool smp_cpu_stopped(uint16_t addr);
> -bool smp_sense_running_status(uint16_t addr);
> -int smp_cpu_restart(uint16_t addr);
> -int smp_cpu_start(uint16_t addr, struct psw psw);
> -int smp_cpu_stop(uint16_t addr);
> -int smp_cpu_stop_store_status(uint16_t addr);
> -int smp_cpu_destroy(uint16_t addr);
> -int smp_cpu_setup(uint16_t addr, struct psw psw);
> +struct cpu *smp_cpu_from_idx(uint16_t idx);
> +uint16_t smp_cpu_addr(uint16_t idx);
> +bool smp_cpu_stopped(uint16_t idx);
> +bool smp_sense_running_status(uint16_t idx);
> +int smp_cpu_restart(uint16_t idx);
> +int smp_cpu_start(uint16_t idx, struct psw psw);
> +int smp_cpu_stop(uint16_t idx);
> +int smp_cpu_stop_store_status(uint16_t idx);
> +int smp_cpu_destroy(uint16_t idx);
> +int smp_cpu_setup(uint16_t idx, struct psw psw);
>   void smp_teardown(void);
>   void smp_setup(void);
> +int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
> +int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
>   
>   #endif
> diff --git a/lib/s390x/smp.c b/lib/s390x/smp.c
> index eae742d2..dde79274 100644
> --- a/lib/s390x/smp.c
> +++ b/lib/s390x/smp.c
> @@ -29,11 +29,28 @@ static struct spinlock lock;
>   
>   extern void smp_cpu_setup_state(void);
>   
> +static void check_idx(uint16_t idx)
> +{
> +	assert(idx < smp_query_num_cpus());
> +}
> +
>   int smp_query_num_cpus(void)
>   {
>   	return sclp_get_cpu_num();
>   }
>   
> +int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
> +{
> +	check_idx(idx);
> +	return sigp(cpus[idx].addr, order, parm, status);
> +}
> +
> +int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
> +{
> +	check_idx(idx);
> +	return sigp_retry(cpus[idx].addr, order, parm, status);
> +}
> +
>   struct cpu *smp_cpu_from_addr(uint16_t addr)
>   {
>   	int i, num = smp_query_num_cpus();
> @@ -45,174 +62,183 @@ struct cpu *smp_cpu_from_addr(uint16_t addr)
>   	return NULL;
>   } >
> -bool smp_cpu_stopped(uint16_t addr)
> +struct cpu *smp_cpu_from_idx(uint16_t idx)
> +{
> +	check_idx(idx);
> +	return &cpus[idx];
> +}
> +
> +uint16_t smp_cpu_addr(uint16_t idx)
> +{
> +	check_idx(idx);
> +	return cpus[idx].addr;
> +}
> +
> +bool smp_cpu_stopped(uint16_t idx)
>   {
>   	uint32_t status;
>   
> -	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
> +	if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
>   		return false;
>   	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
>   }
>   
> -bool smp_sense_running_status(uint16_t addr)
> +bool smp_sense_running_status(uint16_t idx)
>   {
> -	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
> +	if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
>   		return true;
>   	/* Status stored condition code is equivalent to cpu not running. */
>   	return false;
>   }
>   
> -static int smp_cpu_stop_nolock(uint16_t addr, bool store)
> +static int smp_cpu_stop_nolock(uint16_t idx, bool store)
>   {
> -	struct cpu *cpu;
>   	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
>   
> -	cpu = smp_cpu_from_addr(addr);
> -	if (!cpu || addr == cpus[0].addr)
> +	/* refuse to work on the boot CPU */
> +	if (idx == 0)
>   		return -1;
>   
> -	if (sigp_retry(addr, order, 0, NULL))
> +	if (smp_sigp_retry(idx, order, 0, NULL))
>   		return -1;
>   
> -	while (!smp_cpu_stopped(addr))
> +	while (!smp_cpu_stopped(idx))
>   		mb();
> -	cpu->active = false;
> +	/* idx has been already checked by the smp_* functions called above */
> +	cpus[idx].active = false;
>   	return 0;
>   }
>   
> -int smp_cpu_stop(uint16_t addr)
> +int smp_cpu_stop(uint16_t idx)
>   {
>   	int rc;
>   
>   	spin_lock(&lock);
> -	rc = smp_cpu_stop_nolock(addr, false);
> +	rc = smp_cpu_stop_nolock(idx, false);
>   	spin_unlock(&lock);
>   	return rc;
>   }
>   
> -int smp_cpu_stop_store_status(uint16_t addr)
> +int smp_cpu_stop_store_status(uint16_t idx)
>   {
>   	int rc;
>   
>   	spin_lock(&lock);
> -	rc = smp_cpu_stop_nolock(addr, true);
> +	rc = smp_cpu_stop_nolock(idx, true);
>   	spin_unlock(&lock);
>   	return rc;
>   }
>   
> -static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
> +static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
>   {
>   	int rc;
> -	struct cpu *cpu = smp_cpu_from_addr(addr);
>   
> -	if (!cpu)
> -		return -1;
> +	check_idx(idx);
>   	if (psw) {
> -		cpu->lowcore->restart_new_psw.mask = psw->mask;
> -		cpu->lowcore->restart_new_psw.addr = psw->addr;
> +		cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
> +		cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
>   	}
>   	/*
>   	 * Stop the cpu, so we don't have a race between a running cpu
>   	 * and the restart in the test that checks if the cpu is
>   	 * running after the restart.
>   	 */
> -	smp_cpu_stop_nolock(addr, false);
> -	rc = sigp(addr, SIGP_RESTART, 0, NULL);
> +	smp_cpu_stop_nolock(idx, false);
> +	rc = sigp(cpus[idx].addr, SIGP_RESTART, 0, NULL);

What about using the smp wrapper 'smp_sigp(idx, SIGP_RESTART, 0, NULL)' 
here as well?


>   	if (rc)
>   		return rc;
>   	/*
>   	 * The order has been accepted, but the actual restart may not
>   	 * have been performed yet, so wait until the cpu is running.
>   	 */
> -	while (smp_cpu_stopped(addr))
> +	while (smp_cpu_stopped(idx))
>   		mb();
> -	cpu->active = true;
> +	cpus[idx].active = true;
>   	return 0;
>   }
>   
> -int smp_cpu_restart(uint16_t addr)
> +int smp_cpu_restart(uint16_t idx)
>   {
>   	int rc;
>   
>   	spin_lock(&lock);
> -	rc = smp_cpu_restart_nolock(addr, NULL);
> +	rc = smp_cpu_restart_nolock(idx, NULL);
>   	spin_unlock(&lock);
>   	return rc;
>   }
>   
> -int smp_cpu_start(uint16_t addr, struct psw psw)
> +int smp_cpu_start(uint16_t idx, struct psw psw)
>   {
>   	int rc;
>   
>   	spin_lock(&lock);
> -	rc = smp_cpu_restart_nolock(addr, &psw);
> +	rc = smp_cpu_restart_nolock(idx, &psw);
>   	spin_unlock(&lock);
>   	return rc;
>   }
>   
> -int smp_cpu_destroy(uint16_t addr)
> +int smp_cpu_destroy(uint16_t idx)
>   {
> -	struct cpu *cpu;
>   	int rc;
>   
>   	spin_lock(&lock);
> -	rc = smp_cpu_stop_nolock(addr, false);
> +	rc = smp_cpu_stop_nolock(idx, false);
>   	if (!rc) {
> -		cpu = smp_cpu_from_addr(addr);
> -		free_pages(cpu->lowcore);
> -		free_pages(cpu->stack);
> -		cpu->lowcore = (void *)-1UL;
> -		cpu->stack = (void *)-1UL;
> +		free_pages(cpus[idx].lowcore);
> +		free_pages(cpus[idx].stack);
> +		cpus[idx].lowcore = (void *)-1UL;
> +		cpus[idx].stack = (void *)-1UL;
>   	}
>   	spin_unlock(&lock);
>   	return rc;
>   }
>   
> -int smp_cpu_setup(uint16_t addr, struct psw psw)
> +static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
>   {
>   	struct lowcore *lc;
> -	struct cpu *cpu;
> -	int rc = -1;
> -
> -	spin_lock(&lock);
> -
> -	if (!cpus)
> -		goto out;
>   
> -	cpu = smp_cpu_from_addr(addr);
> -
> -	if (!cpu || cpu->active)
> -		goto out;
> +	if (cpus[idx].active)
> +		return -1;
>   
> -	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
> +	sigp_retry(cpus[idx].addr, SIGP_INITIAL_CPU_RESET, 0, NULL);

You may want to use the smp wrapper 'smp_sigp_retry' here.

>   
>   	lc = alloc_pages_flags(1, AREA_DMA31);
> -	cpu->lowcore = lc;
> -	memset(lc, 0, PAGE_SIZE * 2);
> -	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
> +	cpus[idx].lowcore = lc;
> +	sigp_retry(cpus[idx].addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
smp_sigp_retry

>   
>   	/* Copy all exception psws. */
>   	memcpy(lc, cpus[0].lowcore, 512);
>   
>   	/* Setup stack */
> -	cpu->stack = (uint64_t *)alloc_pages(2);
> +	cpus[idx].stack = (uint64_t *)alloc_pages(2);
>   
>   	/* Start without DAT and any other mask bits. */
> -	cpu->lowcore->sw_int_psw.mask = psw.mask;
> -	cpu->lowcore->sw_int_psw.addr = psw.addr;
> -	cpu->lowcore->sw_int_grs[14] = psw.addr;
> -	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
> +	lc->sw_int_psw.mask = psw.mask;
> +	lc->sw_int_psw.addr = psw.addr;
> +	lc->sw_int_grs[14] = psw.addr;
> +	lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
>   	lc->restart_new_psw.mask = PSW_MASK_64;
>   	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
>   	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
>   
>   	/* Start processing */
> -	smp_cpu_restart_nolock(addr, NULL);
> +	smp_cpu_restart_nolock(idx, NULL);
>   	/* Wait until the cpu has finished setup and started the provided psw */
>   	while (lc->restart_new_psw.addr != psw.addr)
>   		mb();
> -	rc = 0;
> -out:
> +
> +	return 0;
> +}
> +
> +int smp_cpu_setup(uint16_t idx, struct psw psw)
> +{
> +	int rc = -1;
> +
> +	spin_lock(&lock);
> +	if (cpus) {
> +		check_idx(idx);
> +		rc = smp_cpu_setup_nolock(idx, psw);
> +	}
>   	spin_unlock(&lock);
>   	return rc;
>   }
> 

With my nits fixed:

Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Claudio Imbrenda Feb. 15, 2022, 11:23 a.m. UTC | #3
On Tue, 15 Feb 2022 12:09:53 +0100
Steffen Eiden <seiden@linux.ibm.com> wrote:

[...]

> What about using the smp wrapper 'smp_sigp(idx, SIGP_RESTART, 0, NULL)' 
> here as well?

[...]

> With my nits fixed:

maybe I should add a comment explaining why I did not use the smp_
variants.

the reason is that the smp_ variants check the validity of the CPU
index. but in those places, we have already checked (directly or
indirectly) that the index is valid, so I save one useless check.

on the other hand, I don't know if it makes sense to optimize for that,
since it's not a hot path, and one extra check will not kill the
performance.

> 
> Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Steffen Eiden Feb. 15, 2022, 11:43 a.m. UTC | #4
On 2/15/22 12:23, Claudio Imbrenda wrote:
> On Tue, 15 Feb 2022 12:09:53 +0100
> Steffen Eiden <seiden@linux.ibm.com> wrote:
> 
> [...]
> 
>> What about using the smp wrapper 'smp_sigp(idx, SIGP_RESTART, 0, NULL)'
>> here as well?
> 
> [...]
> 
>> With my nits fixed:
> 
> maybe I should add a comment explaining why I did not use the smp_
> variants.
> 
> the reason is that the smp_ variants check the validity of the CPU
> index. but in those places, we have already checked (directly or
> indirectly) that the index is valid, so I save one useless check.
> > on the other hand, I don't know if it makes sense to optimize for that,
> since it's not a hot path, and one extra check will not kill the
> performance.
>
I would prefer the use of the smp_ variant. The extra assert won't 
clutter the output and the code is more consistent.
However, a short comment is also fine for me if you prefer that.


>>
>> Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
>
Claudio Imbrenda Feb. 15, 2022, 11:54 a.m. UTC | #5
On Tue, 15 Feb 2022 12:43:15 +0100
Steffen Eiden <seiden@linux.ibm.com> wrote:

> On 2/15/22 12:23, Claudio Imbrenda wrote:
> > On Tue, 15 Feb 2022 12:09:53 +0100
> > Steffen Eiden <seiden@linux.ibm.com> wrote:
> > 
> > [...]
> >   
> >> What about using the smp wrapper 'smp_sigp(idx, SIGP_RESTART, 0, NULL)'
> >> here as well?  
> > 
> > [...]
> >   
> >> With my nits fixed:  
> > 
> > maybe I should add a comment explaining why I did not use the smp_
> > variants.
> > 
> > the reason is that the smp_ variants check the validity of the CPU
> > index. but in those places, we have already checked (directly or
> > indirectly) that the index is valid, so I save one useless check.  
> > > on the other hand, I don't know if it makes sense to optimize for that,  
> > since it's not a hot path, and one extra check will not kill the
> > performance.
> >  
> I would prefer the use of the smp_ variant. The extra assert won't 
> clutter the output and the code is more consistent.
> However, a short comment is also fine for me if you prefer that.

I guess I'll use the smp_ variant and add a few lines in the patch
description to explain that we're doing some extra checks, but the code
is more readable

> 
> >>
> >> Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>  
> >
Janosch Frank Feb. 15, 2022, 3:11 p.m. UTC | #6
On 2/15/22 12:54, Claudio Imbrenda wrote:
> On Tue, 15 Feb 2022 12:43:15 +0100
> Steffen Eiden <seiden@linux.ibm.com> wrote:
> 
>> On 2/15/22 12:23, Claudio Imbrenda wrote:
>>> On Tue, 15 Feb 2022 12:09:53 +0100
>>> Steffen Eiden <seiden@linux.ibm.com> wrote:
>>>
>>> [...]
>>>    
>>>> What about using the smp wrapper 'smp_sigp(idx, SIGP_RESTART, 0, NULL)'
>>>> here as well?
>>>
>>> [...]
>>>    
>>>> With my nits fixed:
>>>
>>> maybe I should add a comment explaining why I did not use the smp_
>>> variants.
>>>
>>> the reason is that the smp_ variants check the validity of the CPU
>>> index. but in those places, we have already checked (directly or
>>> indirectly) that the index is valid, so I save one useless check.
>>>> on the other hand, I don't know if it makes sense to optimize for that,
>>> since it's not a hot path, and one extra check will not kill the
>>> performance.
>>>   
>> I would prefer the use of the smp_ variant. The extra assert won't
>> clutter the output and the code is more consistent.
>> However, a short comment is also fine for me if you prefer that.
> 
> I guess I'll use the smp_ variant and add a few lines in the patch
> description to explain that we're doing some extra checks, but the code
> is more readable
> 
>>
>>>>
>>>> Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
>>>    
> 

Doesn't make a difference to me as you use cpu.addr in the sigp_ which 
tells me it's a cpu address and not an idx.
diff mbox series

Patch

diff --git a/lib/s390x/smp.h b/lib/s390x/smp.h
index a2609f11..1e69a7de 100644
--- a/lib/s390x/smp.h
+++ b/lib/s390x/smp.h
@@ -37,15 +37,19 @@  struct cpu_status {
 
 int smp_query_num_cpus(void);
 struct cpu *smp_cpu_from_addr(uint16_t addr);
-bool smp_cpu_stopped(uint16_t addr);
-bool smp_sense_running_status(uint16_t addr);
-int smp_cpu_restart(uint16_t addr);
-int smp_cpu_start(uint16_t addr, struct psw psw);
-int smp_cpu_stop(uint16_t addr);
-int smp_cpu_stop_store_status(uint16_t addr);
-int smp_cpu_destroy(uint16_t addr);
-int smp_cpu_setup(uint16_t addr, struct psw psw);
+struct cpu *smp_cpu_from_idx(uint16_t idx);
+uint16_t smp_cpu_addr(uint16_t idx);
+bool smp_cpu_stopped(uint16_t idx);
+bool smp_sense_running_status(uint16_t idx);
+int smp_cpu_restart(uint16_t idx);
+int smp_cpu_start(uint16_t idx, struct psw psw);
+int smp_cpu_stop(uint16_t idx);
+int smp_cpu_stop_store_status(uint16_t idx);
+int smp_cpu_destroy(uint16_t idx);
+int smp_cpu_setup(uint16_t idx, struct psw psw);
 void smp_teardown(void);
 void smp_setup(void);
+int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
+int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status);
 
 #endif
diff --git a/lib/s390x/smp.c b/lib/s390x/smp.c
index eae742d2..dde79274 100644
--- a/lib/s390x/smp.c
+++ b/lib/s390x/smp.c
@@ -29,11 +29,28 @@  static struct spinlock lock;
 
 extern void smp_cpu_setup_state(void);
 
+static void check_idx(uint16_t idx)
+{
+	assert(idx < smp_query_num_cpus());
+}
+
 int smp_query_num_cpus(void)
 {
 	return sclp_get_cpu_num();
 }
 
+int smp_sigp(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
+{
+	check_idx(idx);
+	return sigp(cpus[idx].addr, order, parm, status);
+}
+
+int smp_sigp_retry(uint16_t idx, uint8_t order, unsigned long parm, uint32_t *status)
+{
+	check_idx(idx);
+	return sigp_retry(cpus[idx].addr, order, parm, status);
+}
+
 struct cpu *smp_cpu_from_addr(uint16_t addr)
 {
 	int i, num = smp_query_num_cpus();
@@ -45,174 +62,183 @@  struct cpu *smp_cpu_from_addr(uint16_t addr)
 	return NULL;
 }
 
-bool smp_cpu_stopped(uint16_t addr)
+struct cpu *smp_cpu_from_idx(uint16_t idx)
+{
+	check_idx(idx);
+	return &cpus[idx];
+}
+
+uint16_t smp_cpu_addr(uint16_t idx)
+{
+	check_idx(idx);
+	return cpus[idx].addr;
+}
+
+bool smp_cpu_stopped(uint16_t idx)
 {
 	uint32_t status;
 
-	if (sigp(addr, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
+	if (smp_sigp(idx, SIGP_SENSE, 0, &status) != SIGP_CC_STATUS_STORED)
 		return false;
 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
 }
 
-bool smp_sense_running_status(uint16_t addr)
+bool smp_sense_running_status(uint16_t idx)
 {
-	if (sigp(addr, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
+	if (smp_sigp(idx, SIGP_SENSE_RUNNING, 0, NULL) != SIGP_CC_STATUS_STORED)
 		return true;
 	/* Status stored condition code is equivalent to cpu not running. */
 	return false;
 }
 
-static int smp_cpu_stop_nolock(uint16_t addr, bool store)
+static int smp_cpu_stop_nolock(uint16_t idx, bool store)
 {
-	struct cpu *cpu;
 	uint8_t order = store ? SIGP_STOP_AND_STORE_STATUS : SIGP_STOP;
 
-	cpu = smp_cpu_from_addr(addr);
-	if (!cpu || addr == cpus[0].addr)
+	/* refuse to work on the boot CPU */
+	if (idx == 0)
 		return -1;
 
-	if (sigp_retry(addr, order, 0, NULL))
+	if (smp_sigp_retry(idx, order, 0, NULL))
 		return -1;
 
-	while (!smp_cpu_stopped(addr))
+	while (!smp_cpu_stopped(idx))
 		mb();
-	cpu->active = false;
+	/* idx has been already checked by the smp_* functions called above */
+	cpus[idx].active = false;
 	return 0;
 }
 
-int smp_cpu_stop(uint16_t addr)
+int smp_cpu_stop(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, false);
+	rc = smp_cpu_stop_nolock(idx, false);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_stop_store_status(uint16_t addr)
+int smp_cpu_stop_store_status(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, true);
+	rc = smp_cpu_stop_nolock(idx, true);
 	spin_unlock(&lock);
 	return rc;
 }
 
-static int smp_cpu_restart_nolock(uint16_t addr, struct psw *psw)
+static int smp_cpu_restart_nolock(uint16_t idx, struct psw *psw)
 {
 	int rc;
-	struct cpu *cpu = smp_cpu_from_addr(addr);
 
-	if (!cpu)
-		return -1;
+	check_idx(idx);
 	if (psw) {
-		cpu->lowcore->restart_new_psw.mask = psw->mask;
-		cpu->lowcore->restart_new_psw.addr = psw->addr;
+		cpus[idx].lowcore->restart_new_psw.mask = psw->mask;
+		cpus[idx].lowcore->restart_new_psw.addr = psw->addr;
 	}
 	/*
 	 * Stop the cpu, so we don't have a race between a running cpu
 	 * and the restart in the test that checks if the cpu is
 	 * running after the restart.
 	 */
-	smp_cpu_stop_nolock(addr, false);
-	rc = sigp(addr, SIGP_RESTART, 0, NULL);
+	smp_cpu_stop_nolock(idx, false);
+	rc = sigp(cpus[idx].addr, SIGP_RESTART, 0, NULL);
 	if (rc)
 		return rc;
 	/*
 	 * The order has been accepted, but the actual restart may not
 	 * have been performed yet, so wait until the cpu is running.
 	 */
-	while (smp_cpu_stopped(addr))
+	while (smp_cpu_stopped(idx))
 		mb();
-	cpu->active = true;
+	cpus[idx].active = true;
 	return 0;
 }
 
-int smp_cpu_restart(uint16_t addr)
+int smp_cpu_restart(uint16_t idx)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_restart_nolock(addr, NULL);
+	rc = smp_cpu_restart_nolock(idx, NULL);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_start(uint16_t addr, struct psw psw)
+int smp_cpu_start(uint16_t idx, struct psw psw)
 {
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_restart_nolock(addr, &psw);
+	rc = smp_cpu_restart_nolock(idx, &psw);
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_destroy(uint16_t addr)
+int smp_cpu_destroy(uint16_t idx)
 {
-	struct cpu *cpu;
 	int rc;
 
 	spin_lock(&lock);
-	rc = smp_cpu_stop_nolock(addr, false);
+	rc = smp_cpu_stop_nolock(idx, false);
 	if (!rc) {
-		cpu = smp_cpu_from_addr(addr);
-		free_pages(cpu->lowcore);
-		free_pages(cpu->stack);
-		cpu->lowcore = (void *)-1UL;
-		cpu->stack = (void *)-1UL;
+		free_pages(cpus[idx].lowcore);
+		free_pages(cpus[idx].stack);
+		cpus[idx].lowcore = (void *)-1UL;
+		cpus[idx].stack = (void *)-1UL;
 	}
 	spin_unlock(&lock);
 	return rc;
 }
 
-int smp_cpu_setup(uint16_t addr, struct psw psw)
+static int smp_cpu_setup_nolock(uint16_t idx, struct psw psw)
 {
 	struct lowcore *lc;
-	struct cpu *cpu;
-	int rc = -1;
-
-	spin_lock(&lock);
-
-	if (!cpus)
-		goto out;
 
-	cpu = smp_cpu_from_addr(addr);
-
-	if (!cpu || cpu->active)
-		goto out;
+	if (cpus[idx].active)
+		return -1;
 
-	sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
+	sigp_retry(cpus[idx].addr, SIGP_INITIAL_CPU_RESET, 0, NULL);
 
 	lc = alloc_pages_flags(1, AREA_DMA31);
-	cpu->lowcore = lc;
-	memset(lc, 0, PAGE_SIZE * 2);
-	sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
+	cpus[idx].lowcore = lc;
+	sigp_retry(cpus[idx].addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL);
 
 	/* Copy all exception psws. */
 	memcpy(lc, cpus[0].lowcore, 512);
 
 	/* Setup stack */
-	cpu->stack = (uint64_t *)alloc_pages(2);
+	cpus[idx].stack = (uint64_t *)alloc_pages(2);
 
 	/* Start without DAT and any other mask bits. */
-	cpu->lowcore->sw_int_psw.mask = psw.mask;
-	cpu->lowcore->sw_int_psw.addr = psw.addr;
-	cpu->lowcore->sw_int_grs[14] = psw.addr;
-	cpu->lowcore->sw_int_grs[15] = (uint64_t)cpu->stack + (PAGE_SIZE * 4);
+	lc->sw_int_psw.mask = psw.mask;
+	lc->sw_int_psw.addr = psw.addr;
+	lc->sw_int_grs[14] = psw.addr;
+	lc->sw_int_grs[15] = (uint64_t)cpus[idx].stack + (PAGE_SIZE * 4);
 	lc->restart_new_psw.mask = PSW_MASK_64;
 	lc->restart_new_psw.addr = (uint64_t)smp_cpu_setup_state;
 	lc->sw_int_crs[0] = BIT_ULL(CTL0_AFP);
 
 	/* Start processing */
-	smp_cpu_restart_nolock(addr, NULL);
+	smp_cpu_restart_nolock(idx, NULL);
 	/* Wait until the cpu has finished setup and started the provided psw */
 	while (lc->restart_new_psw.addr != psw.addr)
 		mb();
-	rc = 0;
-out:
+
+	return 0;
+}
+
+int smp_cpu_setup(uint16_t idx, struct psw psw)
+{
+	int rc = -1;
+
+	spin_lock(&lock);
+	if (cpus) {
+		check_idx(idx);
+		rc = smp_cpu_setup_nolock(idx, psw);
+	}
 	spin_unlock(&lock);
 	return rc;
 }