@@ -76,14 +76,12 @@ check_sm_in:
tbz x7, #SVCR_SM_SHIFT, check_sve_in
// Load FFR if we have FA64
- mov x4, #0
- tbz x0, #HAVE_FA64_SHIFT, load_sve
- mov x4, #1
+ ubfx x4, x0, #HAVE_FA64_SHIFT, #1
b load_sve
// SVE?
check_sve_in:
- tbz x0, #HAVE_SVE_SHIFT, wait_for_writes
+ tbz x0, #HAVE_SVE_SHIFT, check_fpmr_in
mov x4, #1
load_sve:
@@ -148,6 +146,13 @@ load_sve:
ldr p14, [x7, #14, MUL VL]
ldr p15, [x7, #15, MUL VL]
+ // This has to come after we set PSTATE.SM
+check_fpmr_in:
+ tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes
+ adrp x7, fpmr_in
+ ldr x7, [x7, :lo12:fpmr_in]
+ msr FPMR, x7
+
wait_for_writes:
// Wait for the parent
brk #0
@@ -171,6 +176,12 @@ wait_for_writes:
stp q28, q29, [x7, #16 * 28]
stp q30, q31, [x7, #16 * 30]
+ tbz x0, #HAVE_FPMR_SHIFT, check_sme_out
+ mrs x7, REG_FPMR
+ adrp x6, fpmr_out
+ str x7, [x6, :lo12:fpmr_out]
+
+check_sme_out:
tbz x0, #HAVE_SME_SHIFT, check_sve_out
rdsvl 11, 1
@@ -202,9 +213,7 @@ check_sm_out:
tbz x7, #SVCR_SM_SHIFT, check_sve_out
// Do we have FA64 and FFR?
- mov x4, #0
- tbz x0, #HAVE_FA64_SHIFT, read_sve
- mov x4, #1
+ ubfx x4, x0, #HAVE_FA64_SHIFT, #1
b read_sve
// SVE?
@@ -50,11 +50,22 @@
#define NT_ARM_ZT 0x40d
#endif
+#ifndef NT_ARM_FPMR
+#define NT_ARM_FPMR 0x40e
+#endif
+
#define ARCH_VQ_MAX 256
/* VL 128..2048 in powers of 2 */
#define MAX_NUM_VLS 5
+/*
+ * FPMR bits we can set without doing feature checks to see if values
+ * are valid.
+ */
+#define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \
+ FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK)
+
#define NUM_FPR 32
__uint128_t v_in[NUM_FPR];
__uint128_t v_expected[NUM_FPR];
@@ -80,6 +91,8 @@ char zt_in[ZT_SIG_REG_BYTES];
char zt_expected[ZT_SIG_REG_BYTES];
char zt_out[ZT_SIG_REG_BYTES];
+uint64_t fpmr_in, fpmr_expected, fpmr_out;
+
uint64_t sve_vl_out;
uint64_t sme_vl_out;
uint64_t svcr_in, svcr_expected, svcr_out;
@@ -130,6 +143,11 @@ static bool fa64_supported(void)
return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64;
}
+static bool fpmr_supported(void)
+{
+ return getauxval(AT_HWCAP2) & HWCAP2_FPMR;
+}
+
static bool compare_buffer(const char *name, void *out,
void *expected, size_t size)
{
@@ -235,6 +253,8 @@ static void run_child(struct test_config *config)
flags |= HAVE_SME2;
if (fa64_supported())
flags |= HAVE_FA64;
+ if (fpmr_supported())
+ flags |= HAVE_FPMR;
load_and_save(flags);
@@ -323,6 +343,14 @@ static void read_child_regs(pid_t child)
iov_child.iov_len = sizeof(zt_out);
read_one_child_regs(child, "ZT", &iov_parent, &iov_child);
}
+
+ if (fpmr_supported()) {
+ iov_parent.iov_base = &fpmr_out;
+ iov_parent.iov_len = sizeof(fpmr_out);
+ iov_child.iov_base = &fpmr_out;
+ iov_child.iov_len = sizeof(fpmr_out);
+ read_one_child_regs(child, "FPMR", &iov_parent, &iov_child);
+ }
}
static bool continue_breakpoint(pid_t child,
@@ -597,6 +625,26 @@ static bool check_ptrace_values_zt(pid_t child, struct test_config *config)
return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES);
}
+static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config)
+{
+ uint64_t val;
+ struct iovec iov;
+ int ret;
+
+ if (!fpmr_supported())
+ return true;
+
+ iov.iov_base = &val;
+ iov.iov_len = sizeof(val);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov);
+ if (ret != 0) {
+ ksft_print_msg("Failed to read initial FPMR: %s (%d)\n",
+ strerror(errno), errno);
+ return false;
+ }
+
+ return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val));
+}
static bool check_ptrace_values(pid_t child, struct test_config *config)
{
@@ -631,6 +679,9 @@ static bool check_ptrace_values(pid_t child, struct test_config *config)
if (!check_ptrace_values_zt(child, config))
pass = false;
+ if (!check_ptrace_values_fpmr(child, config))
+ pass = false;
+
return pass;
}
@@ -834,11 +885,18 @@ static void set_initial_values(struct test_config *config)
{
int vq = __sve_vq_from_vl(vl_in(config));
int sme_vq = __sve_vq_from_vl(config->sme_vl_in);
+ bool sm_change;
svcr_in = config->svcr_in;
svcr_expected = config->svcr_expected;
svcr_out = 0;
+ if (sme_supported() &&
+ (svcr_in & SVCR_SM) != (svcr_expected & SVCR_SM))
+ sm_change = true;
+ else
+ sm_change = false;
+
fill_random(&v_in, sizeof(v_in));
memcpy(v_expected, v_in, sizeof(v_in));
memset(v_out, 0, sizeof(v_out));
@@ -885,6 +943,21 @@ static void set_initial_values(struct test_config *config)
memset(zt_expected, 0, ZT_SIG_REG_BYTES);
memset(zt_out, 0, sizeof(zt_out));
}
+
+ if (fpmr_supported()) {
+ fill_random(&fpmr_in, sizeof(fpmr_in));
+ fpmr_in &= FPMR_SAFE_BITS;
+
+ /* Entering or exiting streaming mode clears FPMR */
+ if (sm_change)
+ fpmr_expected = 0;
+ else
+ fpmr_expected = fpmr_in;
+ } else {
+ fpmr_in = 0;
+ fpmr_expected = 0;
+ fpmr_out = 0;
+ }
}
static bool check_memory_values(struct test_config *config)
@@ -935,6 +1008,12 @@ static bool check_memory_values(struct test_config *config)
if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES))
pass = false;
+ if (fpmr_out != fpmr_expected) {
+ ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n",
+ fpmr_out, fpmr_expected);
+ pass = false;
+ }
+
return pass;
}
@@ -1012,6 +1091,36 @@ static void fpsimd_write(pid_t child, struct test_config *test_config)
strerror(errno), errno);
}
+static bool fpmr_write_supported(struct test_config *config)
+{
+ if (!fpmr_supported())
+ return false;
+
+ if (!sve_sme_same(config))
+ return false;
+
+ return true;
+}
+
+static void fpmr_write_expected(struct test_config *config)
+{
+ fill_random(&fpmr_expected, sizeof(fpmr_expected));
+ fpmr_expected &= FPMR_SAFE_BITS;
+}
+
+static void fpmr_write(pid_t child, struct test_config *config)
+{
+ struct iovec iov;
+ int ret;
+
+ iov.iov_len = sizeof(fpmr_expected);
+ iov.iov_base = &fpmr_expected;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov);
+ if (ret != 0)
+ ksft_print_msg("Failed to write FPMR: %s (%d)\n",
+ strerror(errno), errno);
+}
+
static void sve_write_expected(struct test_config *config)
{
int vl = vl_expected(config);
@@ -1268,6 +1377,12 @@ static struct test_definition base_test_defs[] = {
.set_expected_values = fpsimd_write_expected,
.modify_values = fpsimd_write,
},
+ {
+ .name = "FPMR write",
+ .supported = fpmr_write_supported,
+ .set_expected_values = fpmr_write_expected,
+ .modify_values = fpmr_write,
+ },
};
static struct test_definition sve_test_defs[] = {
@@ -1477,6 +1592,9 @@ int main(void)
if (fa64_supported())
ksft_print_msg("FA64 supported\n");
+ if (fpmr_supported())
+ ksft_print_msg("FPMR supported\n");
+
ksft_set_plan(tests);
/* Get signal handers ready before we start any children */
@@ -10,10 +10,12 @@
#define HAVE_SME_SHIFT 1
#define HAVE_SME2_SHIFT 2
#define HAVE_FA64_SHIFT 3
+#define HAVE_FPMR_SHIFT 4
#define HAVE_SVE (1 << HAVE_SVE_SHIFT)
#define HAVE_SME (1 << HAVE_SME_SHIFT)
#define HAVE_SME2 (1 << HAVE_SME2_SHIFT)
#define HAVE_FA64 (1 << HAVE_FA64_SHIFT)
+#define HAVE_FPMR (1 << HAVE_FPMR_SHIFT)
#endif
Add coverage for FPMR to fp-ptrace. FPMR can be available independently of SVE and SME, if SME is supported then FPMR is cleared by entering and exiting streaming mode. As with other registers we generate random values to load into the register, we restrict these to bitfields which are always defined. We also leave bitfields where the valid values are affected by the set of supported FP8 formats zero to reduce complexity, it is unlikely that specific bitfields will be affected by ptrace issues. Signed-off-by: Mark Brown <broonie@kernel.org> --- tools/testing/selftests/arm64/fp/fp-ptrace-asm.S | 23 +++-- tools/testing/selftests/arm64/fp/fp-ptrace.c | 118 +++++++++++++++++++++++ tools/testing/selftests/arm64/fp/fp-ptrace.h | 2 + 3 files changed, 136 insertions(+), 7 deletions(-)