diff mbox series

[kvm-unit-tests,v3,3/3] s390x: smp: add tests for calls in wait state

Message ID 20220725155420.2009109-4-nrb@linux.ibm.com (mailing list archive)
State Superseded, archived
Headers show
Series s390x: add tests for SIGP call orders in enabled wait | expand

Commit Message

Nico Boehr July 25, 2022, 3:54 p.m. UTC
When the SIGP interpretation facility is in use a SIGP external call to
a waiting CPU will result in an exit of the calling cpu. For non-pv
guests it's a code 56 (partial execution) exit otherwise its a code 108
(secure instruction notification) exit. Those exits are handled
differently from a normal SIGP instruction intercept that happens
without interpretation and hence need to be tested.

Signed-off-by: Nico Boehr <nrb@linux.ibm.com>
---
 s390x/smp.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 78 insertions(+)

Comments

Claudio Imbrenda Aug. 2, 2022, 4:06 p.m. UTC | #1
On Mon, 25 Jul 2022 17:54:20 +0200
Nico Boehr <nrb@linux.ibm.com> wrote:

> When the SIGP interpretation facility is in use a SIGP external call to
> a waiting CPU will result in an exit of the calling cpu. For non-pv
> guests it's a code 56 (partial execution) exit otherwise its a code 108
> (secure instruction notification) exit. Those exits are handled
> differently from a normal SIGP instruction intercept that happens
> without interpretation and hence need to be tested.
> 
> Signed-off-by: Nico Boehr <nrb@linux.ibm.com>
> ---
>  s390x/smp.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 78 insertions(+)
> 
> diff --git a/s390x/smp.c b/s390x/smp.c
> index 12c40cadaed2..d59ca38e7a37 100644
> --- a/s390x/smp.c
> +++ b/s390x/smp.c
> @@ -356,6 +356,83 @@ static void test_calls(void)
>  	}
>  }
>  
> +static void call_in_wait_ext_int_fixup(struct stack_frame_int *stack)
> +{
> +	/* Clear wait bit so we don't immediately wait again after the fixup */
> +	lowcore.ext_old_psw.mask &= ~PSW_MASK_WAIT;
> +
> +	stack->crs[0] &= ~BIT(current_sigp_call_case->cr0_bit);
> +}
> +
> +static void call_in_wait_setup(void)
> +{
> +	expect_ext_int();
> +	ctl_set_bit(0, current_sigp_call_case->cr0_bit);
> +	register_ext_cleanup_func(call_in_wait_ext_int_fixup);
> +
> +	set_flag(1);
> +}
> +
> +static void call_in_wait_received(void)
> +{
> +	report(lowcore.ext_int_code == current_sigp_call_case->ext_int_expected_type, "received");
> +	register_ext_cleanup_func(NULL);
> +
> +	set_flag(1);
> +}
> +
> +static void test_calls_in_wait(void)
> +{
> +	int i;
> +	struct psw psw;
> +
> +	report_prefix_push("psw wait");
> +	for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
> +		current_sigp_call_case = &cases_sigp_call[i];
> +
> +		report_prefix_push(current_sigp_call_case->name);
> +		if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
> +			report_skip("Not supported under PV");
> +			report_prefix_pop();
> +			continue;
> +		}
> +
> +		/* Let the secondary CPU setup the external mask and the external interrupt cleanup function */
> +		set_flag(0);
> +		psw.mask = extract_psw_mask();
> +		psw.addr = (unsigned long)call_in_wait_setup;
> +		smp_cpu_start(1, psw);
> +
> +		/* Wait until the receiver has finished setup */
> +		wait_for_flag();
> +		set_flag(0);
> +
> +		/*
> +		 * To avoid races, we need to know that the secondary CPU has entered wait,
> +		 * but the architecture provides no way to check whether the secondary CPU
> +		 * is in wait.
> +		 *
> +		 * But since a waiting CPU is considered operating, simply stop the CPU, set
> +		 * up the restart new PSW mask in wait, send the restart interrupt and then
> +		 * wait until the CPU becomes operating (done by smp_cpu_start).
> +		 */
> +		smp_cpu_stop(1);
> +		expect_ext_int();

which external interrupt are you expecting on this CPU?

> +		psw.mask = extract_psw_mask() | PSW_MASK_EXT | PSW_MASK_WAIT;
> +		psw.addr = (unsigned long)call_in_wait_received;
> +		smp_cpu_start(1, psw);
> +
> +		smp_sigp(1, current_sigp_call_case->call, 0, NULL);
> +
> +		/* Wait until the receiver has handled the call */
> +		wait_for_flag();
> +		smp_cpu_stop(1);
> +
> +		report_prefix_pop();
> +	}
> +	report_prefix_pop();
> +}
> +
>  static void test_sense_running(void)
>  {
>  	report_prefix_push("sense_running");
> @@ -474,6 +551,7 @@ int main(void)
>  	test_store_status();
>  	test_set_prefix();
>  	test_calls();
> +	test_calls_in_wait();
>  	test_sense_running();
>  	test_reset();
>  	test_reset_initial();
Nico Boehr Aug. 8, 2022, 3:35 p.m. UTC | #2
Quoting Claudio Imbrenda (2022-08-02 18:06:05)
[...]
> > diff --git a/s390x/smp.c b/s390x/smp.c
> > index 12c40cadaed2..d59ca38e7a37 100644
> > --- a/s390x/smp.c
> > +++ b/s390x/smp.c
[...]
> > +static void test_calls_in_wait(void)
> > +{
[...]
> > +             /*
> > +              * To avoid races, we need to know that the secondary CPU has entered wait,
> > +              * but the architecture provides no way to check whether the secondary CPU
> > +              * is in wait.
> > +              *
> > +              * But since a waiting CPU is considered operating, simply stop the CPU, set
> > +              * up the restart new PSW mask in wait, send the restart interrupt and then
> > +              * wait until the CPU becomes operating (done by smp_cpu_start).
> > +              */
> > +             smp_cpu_stop(1);
> > +             expect_ext_int();
> 
> which external interrupt are you expecting on this CPU?

Right, leftover code, can be removed. Will be fixed.
diff mbox series

Patch

diff --git a/s390x/smp.c b/s390x/smp.c
index 12c40cadaed2..d59ca38e7a37 100644
--- a/s390x/smp.c
+++ b/s390x/smp.c
@@ -356,6 +356,83 @@  static void test_calls(void)
 	}
 }
 
+static void call_in_wait_ext_int_fixup(struct stack_frame_int *stack)
+{
+	/* Clear wait bit so we don't immediately wait again after the fixup */
+	lowcore.ext_old_psw.mask &= ~PSW_MASK_WAIT;
+
+	stack->crs[0] &= ~BIT(current_sigp_call_case->cr0_bit);
+}
+
+static void call_in_wait_setup(void)
+{
+	expect_ext_int();
+	ctl_set_bit(0, current_sigp_call_case->cr0_bit);
+	register_ext_cleanup_func(call_in_wait_ext_int_fixup);
+
+	set_flag(1);
+}
+
+static void call_in_wait_received(void)
+{
+	report(lowcore.ext_int_code == current_sigp_call_case->ext_int_expected_type, "received");
+	register_ext_cleanup_func(NULL);
+
+	set_flag(1);
+}
+
+static void test_calls_in_wait(void)
+{
+	int i;
+	struct psw psw;
+
+	report_prefix_push("psw wait");
+	for (i = 0; i < ARRAY_SIZE(cases_sigp_call); i++) {
+		current_sigp_call_case = &cases_sigp_call[i];
+
+		report_prefix_push(current_sigp_call_case->name);
+		if (!current_sigp_call_case->supports_pv && uv_os_is_guest()) {
+			report_skip("Not supported under PV");
+			report_prefix_pop();
+			continue;
+		}
+
+		/* Let the secondary CPU setup the external mask and the external interrupt cleanup function */
+		set_flag(0);
+		psw.mask = extract_psw_mask();
+		psw.addr = (unsigned long)call_in_wait_setup;
+		smp_cpu_start(1, psw);
+
+		/* Wait until the receiver has finished setup */
+		wait_for_flag();
+		set_flag(0);
+
+		/*
+		 * To avoid races, we need to know that the secondary CPU has entered wait,
+		 * but the architecture provides no way to check whether the secondary CPU
+		 * is in wait.
+		 *
+		 * But since a waiting CPU is considered operating, simply stop the CPU, set
+		 * up the restart new PSW mask in wait, send the restart interrupt and then
+		 * wait until the CPU becomes operating (done by smp_cpu_start).
+		 */
+		smp_cpu_stop(1);
+		expect_ext_int();
+		psw.mask = extract_psw_mask() | PSW_MASK_EXT | PSW_MASK_WAIT;
+		psw.addr = (unsigned long)call_in_wait_received;
+		smp_cpu_start(1, psw);
+
+		smp_sigp(1, current_sigp_call_case->call, 0, NULL);
+
+		/* Wait until the receiver has handled the call */
+		wait_for_flag();
+		smp_cpu_stop(1);
+
+		report_prefix_pop();
+	}
+	report_prefix_pop();
+}
+
 static void test_sense_running(void)
 {
 	report_prefix_push("sense_running");
@@ -474,6 +551,7 @@  int main(void)
 	test_store_status();
 	test_set_prefix();
 	test_calls();
+	test_calls_in_wait();
 	test_sense_running();
 	test_reset();
 	test_reset_initial();