diff mbox series

[v3] target/xtensa: clean up unaligned access

Message ID 20210517205259.13241-1-jcmvbkbc@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v3] target/xtensa: clean up unaligned access | expand

Commit Message

Max Filippov May 17, 2021, 8:52 p.m. UTC
Xtensa cores may or may not have hardware support for unaligned memory
access. On cores with such support pass MO_UNALN in memory access flags
for all operations that would not raise an exception. Drop condition
from xtensa_cpu_do_unaligned_access and replace it with assertions.
Add a test.

Suggested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
---
Changes v2->v3:
- drop assertion for !XTENSA_OPTION_HW_ALIGNMENT from
  xtensa_cpu_do_unaligned_access to correctly handle acquire/release
  intsructions;
- add tests for acquire/release instructions.

Changes v1->v2:
- correctly handle case of !XCHAL_UNALIGNED_*_EXCEPTION in the test

 target/xtensa/helper.c             |  13 +-
 target/xtensa/translate.c          | 108 +++++++-------
 tests/tcg/xtensa/test_load_store.S | 221 +++++++++++++++++++++++++++++
 3 files changed, 281 insertions(+), 61 deletions(-)
 create mode 100644 tests/tcg/xtensa/test_load_store.S

Comments

Richard Henderson May 18, 2021, 8:11 p.m. UTC | #1
On 5/17/21 3:52 PM, Max Filippov wrote:
> -static void gen_load_store_alignment(DisasContext *dc, int shift,
> -        TCGv_i32 addr, bool no_hw_alignment)
> +static MemOp gen_load_store_alignment(DisasContext *dc, int shift,
> +                                      TCGv_i32 addr, bool no_hw_alignment)
>   {
>       if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
>           tcg_gen_andi_i32(addr, addr, ~0 << shift);
> -    } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
> -            no_hw_alignment) {
> -        TCGLabel *label = gen_new_label();
> -        TCGv_i32 tmp = tcg_temp_new_i32();
> -        tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
> -        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
> -        gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
> -        gen_set_label(label);
> -        tcg_temp_free(tmp);
> +    }
> +    if (!no_hw_alignment && option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT)) {
> +        return MO_UNALN;
> +    } else {
> +        return MO_ALIGN;
>       }
>   }
>   
> @@ -1784,10 +1770,11 @@ static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
>                              const uint32_t par[])
>   {
>       TCGv_i32 addr = tcg_temp_new_i32();
> +    MemOp al;
>   
>       tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
> -    gen_load_store_alignment(dc, 2, addr, false);
> -    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL);
> +    al = gen_load_store_alignment(dc, 2, addr, false);
> +    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL | al);

You're duplicating the information about the size of the alignment.

I think it would be better to pass the partial MemOp into 
get_load_store_alignment and return the complete MemOp.  E.g.:

MemOp gen_load_store_alignment(DisasContext *dc, MemOp mop,
                                TCGv addr)
{
     if ((mop & MO_SIZE) == MO_8) {
         return mop;
     }
     if ((mop & MO_AMASK) == 0 &&
         !option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT)) {
         mop |= MO_ALIGN;
     }

     if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
         tcg_gen_andi_i32(addr, addr,
                          ~0 << get_alignment_bits(mop));
     }
     return mop;
}

Then used as

     mop = gen_load_store_alignment(dc, MO_TEUL, addr);
     tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, mop);

     mop = gen_load_store_alignment(dc, MO_TEUL | MO_ALIGN, addr);
     gen_check_exclusive(dc, addr, false);
     tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, mop);

This organization does require that you remove TARGET_ALIGNED_ONLY=y from 
default-configs/targets/xtensa*.mak so that MO_ALIGN has the non-zero value.


r~
Max Filippov May 19, 2021, 12:26 a.m. UTC | #2
Hi Richard,

On Tue, May 18, 2021 at 1:11 PM Richard Henderson
<richard.henderson@linaro.org> wrote:
> On 5/17/21 3:52 PM, Max Filippov wrote:
> > @@ -1784,10 +1770,11 @@ static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
> >                              const uint32_t par[])
> >   {
> >       TCGv_i32 addr = tcg_temp_new_i32();
> > +    MemOp al;
> >
> >       tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
> > -    gen_load_store_alignment(dc, 2, addr, false);
> > -    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL);
> > +    al = gen_load_store_alignment(dc, 2, addr, false);
> > +    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL | al);
>
> You're duplicating the information about the size of the alignment.
>
> I think it would be better to pass the partial MemOp into
> get_load_store_alignment and return the complete MemOp.  E.g.:

That indeed looks better. Let me make another version of this patch.
Thanks for taking a look!
diff mbox series

Patch

diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
index eeffee297d15..f18ab383fd89 100644
--- a/target/xtensa/helper.c
+++ b/target/xtensa/helper.c
@@ -270,13 +270,12 @@  void xtensa_cpu_do_unaligned_access(CPUState *cs,
     XtensaCPU *cpu = XTENSA_CPU(cs);
     CPUXtensaState *env = &cpu->env;
 
-    if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
-        !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
-        cpu_restore_state(CPU(cpu), retaddr, true);
-        HELPER(exception_cause_vaddr)(env,
-                                      env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
-                                      addr);
-    }
+    assert(xtensa_option_enabled(env->config,
+                                 XTENSA_OPTION_UNALIGNED_EXCEPTION));
+    cpu_restore_state(CPU(cpu), retaddr, true);
+    HELPER(exception_cause_vaddr)(env,
+                                  env->pc, LOAD_STORE_ALIGNMENT_CAUSE,
+                                  addr);
 }
 
 bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index 0ae4efc48a17..8759bea7ff85 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -339,16 +339,6 @@  static void gen_exception_cause(DisasContext *dc, uint32_t cause)
     }
 }
 
-static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
-        TCGv_i32 vaddr)
-{
-    TCGv_i32 tpc = tcg_const_i32(dc->pc);
-    TCGv_i32 tcause = tcg_const_i32(cause);
-    gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
-    tcg_temp_free(tpc);
-    tcg_temp_free(tcause);
-}
-
 static void gen_debug_exception(DisasContext *dc, uint32_t cause)
 {
     TCGv_i32 tpc = tcg_const_i32(dc->pc);
@@ -554,20 +544,16 @@  static uint32_t test_exceptions_hpi(DisasContext *dc, const OpcodeArg arg[],
     return test_exceptions_sr(dc, arg, par);
 }
 
-static void gen_load_store_alignment(DisasContext *dc, int shift,
-        TCGv_i32 addr, bool no_hw_alignment)
+static MemOp gen_load_store_alignment(DisasContext *dc, int shift,
+                                      TCGv_i32 addr, bool no_hw_alignment)
 {
     if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
         tcg_gen_andi_i32(addr, addr, ~0 << shift);
-    } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
-            no_hw_alignment) {
-        TCGLabel *label = gen_new_label();
-        TCGv_i32 tmp = tcg_temp_new_i32();
-        tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
-        tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
-        gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
-        gen_set_label(label);
-        tcg_temp_free(tmp);
+    }
+    if (!no_hw_alignment && option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT)) {
+        return MO_UNALN;
+    } else {
+        return MO_ALIGN;
     }
 }
 
@@ -1784,10 +1770,11 @@  static void translate_l32e(DisasContext *dc, const OpcodeArg arg[],
                            const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al;
 
     tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
-    gen_load_store_alignment(dc, 2, addr, false);
-    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL);
+    al = gen_load_store_alignment(dc, 2, addr, false);
+    tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL | al);
     tcg_temp_free(addr);
 }
 
@@ -1813,11 +1800,12 @@  static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[],
                             const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al;
 
     tcg_gen_mov_i32(addr, arg[1].in);
-    gen_load_store_alignment(dc, 2, addr, true);
+    al = gen_load_store_alignment(dc, 2, addr, true);
     gen_check_exclusive(dc, addr, false);
-    tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL);
+    tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL | al);
     tcg_gen_mov_i32(cpu_exclusive_addr, addr);
     tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out);
     tcg_temp_free(addr);
@@ -1827,18 +1815,19 @@  static void translate_ldst(DisasContext *dc, const OpcodeArg arg[],
                            const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al = MO_UNALN;
 
     tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
     if (par[0] & MO_SIZE) {
-        gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]);
+        al = gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]);
     }
     if (par[2]) {
         if (par[1]) {
             tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL);
         }
-        tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]);
+        tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0] | al);
     } else {
-        tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]);
+        tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0] | al);
         if (par[1]) {
             tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL);
         }
@@ -1909,9 +1898,11 @@  static void translate_mac16(DisasContext *dc, const OpcodeArg arg[],
     TCGv_i32 mem32 = tcg_temp_new_i32();
 
     if (ld_offset) {
+        MemOp al;
+
         tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset);
-        gen_load_store_alignment(dc, 2, vaddr, false);
-        tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
+        al = gen_load_store_alignment(dc, 2, vaddr, false);
+        tcg_gen_qemu_ld_tl(mem32, vaddr, dc->cring, MO_TEUL | al);
     }
     if (op != MAC16_NONE) {
         TCGv_i32 m1 = gen_mac16_m(arg[off].in,
@@ -2357,13 +2348,14 @@  static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[],
 {
     TCGv_i32 tmp = tcg_temp_local_new_i32();
     TCGv_i32 addr = tcg_temp_local_new_i32();
+    MemOp al;
 
     tcg_gen_mov_i32(tmp, arg[0].in);
     tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
-    gen_load_store_alignment(dc, 2, addr, true);
+    al = gen_load_store_alignment(dc, 2, addr, true);
     gen_check_atomctl(dc, addr);
     tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1],
-                               tmp, dc->cring, MO_TEUL);
+                               tmp, dc->cring, MO_TEUL | al);
     tcg_temp_free(addr);
     tcg_temp_free(tmp);
 }
@@ -2372,10 +2364,11 @@  static void translate_s32e(DisasContext *dc, const OpcodeArg arg[],
                            const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al;
 
     tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
-    gen_load_store_alignment(dc, 2, addr, false);
-    tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL);
+    al = gen_load_store_alignment(dc, 2, addr, false);
+    tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL | al);
     tcg_temp_free(addr);
 }
 
@@ -2386,14 +2379,15 @@  static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[],
     TCGv_i32 addr = tcg_temp_local_new_i32();
     TCGv_i32 res = tcg_temp_local_new_i32();
     TCGLabel *label = gen_new_label();
+    MemOp al;
 
     tcg_gen_movi_i32(res, 0);
     tcg_gen_mov_i32(addr, arg[1].in);
-    gen_load_store_alignment(dc, 2, addr, true);
+    al = gen_load_store_alignment(dc, 2, addr, true);
     tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label);
     gen_check_exclusive(dc, addr, true);
     tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val,
-                               arg[0].in, dc->cring, MO_TEUL);
+                               arg[0].in, dc->cring, MO_TEUL | al);
     tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val);
     tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val,
                         prev, cpu_exclusive_val, prev, cpu_exclusive_val);
@@ -6642,13 +6636,14 @@  static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[],
                             const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al;
 
     tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm);
-    gen_load_store_alignment(dc, 2, addr, false);
+    al = gen_load_store_alignment(dc, 2, addr, false);
     if (par[0]) {
-        tcg_gen_qemu_st32(arg[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, MO_TEUL | al);
     } else {
-        tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, MO_TEUL | al);
     }
     if (par[1]) {
         tcg_gen_mov_i32(arg[1].out, addr);
@@ -6660,13 +6655,14 @@  static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[],
                             const uint32_t par[])
 {
     TCGv_i32 addr = tcg_temp_new_i32();
+    MemOp al;
 
     tcg_gen_add_i32(addr, arg[1].in, arg[2].in);
-    gen_load_store_alignment(dc, 2, addr, false);
+    al = gen_load_store_alignment(dc, 2, addr, false);
     if (par[0]) {
-        tcg_gen_qemu_st32(arg[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, MO_TEUL | al);
     } else {
-        tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, MO_TEUL | al);
     }
     if (par[1]) {
         tcg_gen_mov_i32(arg[1].out, addr);
@@ -7104,6 +7100,7 @@  static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[],
                               const uint32_t par[])
 {
     TCGv_i32 addr;
+    MemOp al;
 
     if (par[1]) {
         addr = tcg_temp_new_i32();
@@ -7111,11 +7108,11 @@  static void translate_ldsti_d(DisasContext *dc, const OpcodeArg arg[],
     } else {
         addr = arg[1].in;
     }
-    gen_load_store_alignment(dc, 3, addr, false);
+    al = gen_load_store_alignment(dc, 3, addr, false);
     if (par[0]) {
-        tcg_gen_qemu_st64(arg[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, MO_TEQ | al);
     } else {
-        tcg_gen_qemu_ld64(arg[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, MO_TEQ | al);
     }
     if (par[2]) {
         if (par[1]) {
@@ -7134,6 +7131,7 @@  static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[],
 {
     TCGv_i32 addr;
     OpcodeArg arg32[1];
+    MemOp al;
 
     if (par[1]) {
         addr = tcg_temp_new_i32();
@@ -7141,14 +7139,14 @@  static void translate_ldsti_s(DisasContext *dc, const OpcodeArg arg[],
     } else {
         addr = arg[1].in;
     }
-    gen_load_store_alignment(dc, 2, addr, false);
+    al = gen_load_store_alignment(dc, 2, addr, false);
     if (par[0]) {
         get_f32_i1(arg, arg32, 0);
-        tcg_gen_qemu_st32(arg32[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, MO_TEUL | al);
         put_f32_i1(arg, arg32, 0);
     } else {
         get_f32_o1(arg, arg32, 0);
-        tcg_gen_qemu_ld32u(arg32[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, MO_TEUL | al);
         put_f32_o1(arg, arg32, 0);
     }
     if (par[2]) {
@@ -7167,6 +7165,7 @@  static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[],
                               const uint32_t par[])
 {
     TCGv_i32 addr;
+    MemOp al;
 
     if (par[1]) {
         addr = tcg_temp_new_i32();
@@ -7174,11 +7173,11 @@  static void translate_ldstx_d(DisasContext *dc, const OpcodeArg arg[],
     } else {
         addr = arg[1].in;
     }
-    gen_load_store_alignment(dc, 3, addr, false);
+    al = gen_load_store_alignment(dc, 3, addr, false);
     if (par[0]) {
-        tcg_gen_qemu_st64(arg[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, MO_TEQ | al);
     } else {
-        tcg_gen_qemu_ld64(arg[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_i64(arg[0].out, addr, dc->cring, MO_TEQ | al);
     }
     if (par[2]) {
         if (par[1]) {
@@ -7197,6 +7196,7 @@  static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[],
 {
     TCGv_i32 addr;
     OpcodeArg arg32[1];
+    MemOp al;
 
     if (par[1]) {
         addr = tcg_temp_new_i32();
@@ -7204,14 +7204,14 @@  static void translate_ldstx_s(DisasContext *dc, const OpcodeArg arg[],
     } else {
         addr = arg[1].in;
     }
-    gen_load_store_alignment(dc, 2, addr, false);
+    al = gen_load_store_alignment(dc, 2, addr, false);
     if (par[0]) {
         get_f32_i1(arg, arg32, 0);
-        tcg_gen_qemu_st32(arg32[0].in, addr, dc->cring);
+        tcg_gen_qemu_st_tl(arg32[0].in, addr, dc->cring, MO_TEUL | al);
         put_f32_i1(arg, arg32, 0);
     } else {
         get_f32_o1(arg, arg32, 0);
-        tcg_gen_qemu_ld32u(arg32[0].out, addr, dc->cring);
+        tcg_gen_qemu_ld_tl(arg32[0].out, addr, dc->cring, MO_TEUL | al);
         put_f32_o1(arg, arg32, 0);
     }
     if (par[2]) {
diff --git a/tests/tcg/xtensa/test_load_store.S b/tests/tcg/xtensa/test_load_store.S
new file mode 100644
index 000000000000..b339f40f1280
--- /dev/null
+++ b/tests/tcg/xtensa/test_load_store.S
@@ -0,0 +1,221 @@ 
+#include "macros.inc"
+
+test_suite load_store
+
+.macro load_ok_test op, type, data, value
+    .data
+    .align  4
+1:
+    \type \data
+    .previous
+
+    reset_ps
+    set_vector kernel, 0
+    movi    a3, 1b
+    addi    a4, a4, 1
+    mov     a5, a4
+    \op     a5, a3, 0
+    movi    a6, \value
+    assert  eq, a5, a6
+.endm
+
+#if XCHAL_UNALIGNED_LOAD_EXCEPTION
+.macro load_unaligned_test will_trap, op, type, data, value
+    .data
+    .align  4
+    .byte   0
+1:
+    \type \data
+    .previous
+
+    reset_ps
+    .ifeq \will_trap
+    set_vector kernel, 0
+    .else
+    set_vector kernel, 2f
+    .endif
+    movi    a3, 1b
+    addi    a4, a4, 1
+    mov     a5, a4
+1:
+    \op     a5, a3, 0
+    .ifeq \will_trap
+    movi    a6, \value
+    assert  eq, a5, a6
+    .else
+    test_fail
+2:
+    rsr     a6, exccause
+    movi    a7, 9
+    assert  eq, a6, a7
+    rsr     a6, epc1
+    movi    a7, 1b
+    assert  eq, a6, a7
+    rsr     a6, excvaddr
+    assert  eq, a6, a3
+    assert  eq, a5, a4
+    .endif
+    reset_ps
+.endm
+#else
+.macro load_unaligned_test will_trap, op, type, data, value
+    .data
+    .align  4
+1:
+    \type \data
+    .previous
+
+    reset_ps
+    set_vector kernel, 0
+    movi    a3, 1b + 1
+    addi    a4, a4, 1
+    mov     a5, a4
+    \op     a5, a3, 0
+    movi    a6, \value
+    assert  eq, a5, a6
+.endm
+#endif
+
+.macro store_ok_test op, type, value
+    .data
+    .align  4
+    .byte   0, 0, 0, 0x55
+1:
+    \type 0
+2:
+    .byte   0xaa
+    .previous
+
+    reset_ps
+    set_vector kernel, 0
+    movi    a3, 1b
+    movi    a5, \value
+    \op     a5, a3, 0
+    movi    a3, 2b
+    l8ui    a5, a3, 0
+    movi    a6, 0xaa
+    assert  eq, a5, a6
+    movi    a3, 1b - 1
+    l8ui    a5, a3, 0
+    movi    a6, 0x55
+    assert  eq, a5, a6
+.endm
+
+#if XCHAL_UNALIGNED_STORE_EXCEPTION
+.macro store_unaligned_test will_trap, op, nop, type, value
+    .data
+    .align  4
+    .byte   0x55
+1:
+    \type   0
+2:
+    .byte   0xaa
+    .previous
+
+    reset_ps
+    .ifeq \will_trap
+    set_vector kernel, 0
+    .else
+    set_vector kernel, 4f
+    .endif
+    movi    a3, 1b
+    movi    a5, \value
+3:
+    \op     a5, a3, 0
+    .ifne \will_trap
+    test_fail
+4:
+    rsr     a6, exccause
+    movi    a7, 9
+    assert  eq, a6, a7
+    rsr     a6, epc1
+    movi    a7, 3b
+    assert  eq, a6, a7
+    rsr     a6, excvaddr
+    assert  eq, a6, a3
+    l8ui    a5, a3, 0
+    assert  eqi, a5, 0
+    .endif
+    reset_ps
+    movi    a3, 2b
+    l8ui    a5, a3, 0
+    movi    a6, 0xaa
+    assert  eq, a5, a6
+    movi    a3, 1b - 1
+    l8ui    a5, a3, 0
+    movi    a6, 0x55
+    assert  eq, a5, a6
+.endm
+#else
+.macro store_unaligned_test will_trap, sop, lop, type, value
+    .data
+    .align  4
+    .byte   0x55
+1:
+    \type   0
+    .previous
+
+    reset_ps
+    set_vector kernel, 0
+    movi    a3, 1b
+    movi    a5, \value
+    \sop    a5, a3, 0
+    movi    a3, 1b - 1
+    \lop    a6, a3, 0
+    assert  eq, a5, a6
+.endm
+#endif
+
+test load_ok
+    load_ok_test l16si, .short, 0x00001234, 0x00001234
+    load_ok_test l16si, .short, 0x000089ab, 0xffff89ab
+    load_ok_test l16ui, .short, 0x00001234, 0x00001234
+    load_ok_test l16ui, .short, 0x000089ab, 0x000089ab
+    load_ok_test l32i,  .word,  0x12345678, 0x12345678
+#if XCHAL_HAVE_RELEASE_SYNC
+    load_ok_test l32ai, .word,  0x12345678, 0x12345678
+#endif
+test_end
+
+#undef WILL_TRAP
+#if XCHAL_UNALIGNED_LOAD_HW
+#define WILL_TRAP 0
+#else
+#define WILL_TRAP 1
+#endif
+
+test load_unaligned
+    load_unaligned_test WILL_TRAP, l16si, .short, 0x00001234, 0x00001234
+    load_unaligned_test WILL_TRAP, l16si, .short, 0x000089ab, 0xffff89ab
+    load_unaligned_test WILL_TRAP, l16ui, .short, 0x00001234, 0x00001234
+    load_unaligned_test WILL_TRAP, l16ui, .short, 0x000089ab, 0x000089ab
+    load_unaligned_test WILL_TRAP, l32i,  .word,  0x12345678, 0x12345678
+#if XCHAL_HAVE_RELEASE_SYNC
+    load_unaligned_test 1,         l32ai, .word,  0x12345678, 0x12345678
+#endif
+test_end
+
+test store_ok
+    store_ok_test s16i,  .short, 0x00001234
+    store_ok_test s32i,  .word,  0x12345678
+#if XCHAL_HAVE_RELEASE_SYNC
+    store_ok_test s32ri, .word,  0x12345678
+#endif
+test_end
+
+#undef WILL_TRAP
+#if XCHAL_UNALIGNED_STORE_HW
+#define WILL_TRAP 0
+#else
+#define WILL_TRAP 1
+#endif
+
+test store_unaligned
+    store_unaligned_test WILL_TRAP, s16i,  l16ui, .short, 0x00001234
+    store_unaligned_test WILL_TRAP, s32i,  l32i,  .word,  0x12345678
+#if XCHAL_HAVE_RELEASE_SYNC
+    store_unaligned_test 1,         s32ri, l32i,  .word,  0x12345678
+#endif
+test_end
+
+test_suite_end