@@ -38,6 +38,7 @@
#define RISCV_SZPTR "8"
#define RISCV_LGPTR "3"
#endif
+#define __PTR_SEL(a, b) __ASM_STR(a)
#elif __SIZEOF_POINTER__ == 4
#ifdef __ASSEMBLY__
#define RISCV_PTR .word
@@ -48,10 +49,14 @@
#define RISCV_SZPTR "4"
#define RISCV_LGPTR "2"
#endif
+#define __PTR_SEL(a, b) __ASM_STR(b)
#else
#error "Unexpected __SIZEOF_POINTER__"
#endif
+#define PTR_L __PTR_SEL(ld, lw)
+#define PTR_S __PTR_SEL(sd, sw)
+
#if (__SIZEOF_INT__ == 4)
#define RISCV_INT __ASM_STR(.word)
#define RISCV_SZINT __ASM_STR(4)
@@ -25,19 +25,19 @@ SYM_CODE_START(handle_exception)
_restore_kernel_tpsp:
csrr tp, CSR_SCRATCH
- REG_S sp, TASK_TI_KERNEL_SP(tp)
+ PTR_S sp, TASK_TI_KERNEL_SP(tp)
#ifdef CONFIG_VMAP_STACK
addi sp, sp, -(PT_SIZE_ON_STACK)
srli sp, sp, THREAD_SHIFT
andi sp, sp, 0x1
bnez sp, handle_kernel_stack_overflow
- REG_L sp, TASK_TI_KERNEL_SP(tp)
+ PTR_L sp, TASK_TI_KERNEL_SP(tp)
#endif
_save_context:
- REG_S sp, TASK_TI_USER_SP(tp)
- REG_L sp, TASK_TI_KERNEL_SP(tp)
+ PTR_S sp, TASK_TI_USER_SP(tp)
+ PTR_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
REG_S x1, PT_RA(sp)
REG_S x3, PT_GP(sp)
@@ -53,7 +53,7 @@ _save_context:
*/
li t0, SR_SUM | SR_FS_VS
- REG_L s0, TASK_TI_USER_SP(tp)
+ PTR_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_STATUS, t0
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
@@ -96,7 +96,7 @@ _save_context:
add t0, t1, t0
/* Check if exception code lies within bounds */
bgeu t0, t2, 1f
- REG_L t0, 0(t0)
+ PTR_L t0, 0(t0)
jr t0
1:
tail do_trap_unknown
@@ -121,7 +121,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
/* Save unwound kernel stack pointer in thread_info */
addi s0, sp, PT_SIZE_ON_STACK
- REG_S s0, TASK_TI_KERNEL_SP(tp)
+ PTR_S s0, TASK_TI_KERNEL_SP(tp)
/*
* Save TP into the scratch register , so we can find the kernel data
@@ -239,7 +239,7 @@ restore_caller_reg:
REG_S x5, PT_T0(sp)
save_from_x6_to_x31
- REG_L s0, TASK_TI_KERNEL_SP(tp)
+ PTR_L s0, TASK_TI_KERNEL_SP(tp)
csrr s1, CSR_STATUS
csrr s2, CSR_EPC
csrr s3, CSR_TVAL
@@ -283,8 +283,8 @@ SYM_FUNC_START(__switch_to)
li a4, TASK_THREAD_RA
add a3, a0, a4
add a4, a1, a4
- REG_S ra, TASK_THREAD_RA_RA(a3)
- REG_S sp, TASK_THREAD_SP_RA(a3)
+ PTR_S ra, TASK_THREAD_RA_RA(a3)
+ PTR_S sp, TASK_THREAD_SP_RA(a3)
REG_S s0, TASK_THREAD_S0_RA(a3)
REG_S s1, TASK_THREAD_S1_RA(a3)
REG_S s2, TASK_THREAD_S2_RA(a3)
@@ -298,8 +298,8 @@ SYM_FUNC_START(__switch_to)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
/* Restore context from next->thread */
- REG_L ra, TASK_THREAD_RA_RA(a4)
- REG_L sp, TASK_THREAD_SP_RA(a4)
+ PTR_L ra, TASK_THREAD_RA_RA(a4)
+ PTR_L sp, TASK_THREAD_SP_RA(a4)
REG_L s0, TASK_THREAD_S0_RA(a4)
REG_L s1, TASK_THREAD_S1_RA(a4)
REG_L s2, TASK_THREAD_S2_RA(a4)
@@ -42,7 +42,7 @@ ENTRY(_start)
/* Image load offset (0MB) from start of RAM for M-mode */
.dword 0
#else
-#if __riscv_xlen == 64
+#ifdef CONFIG_64BIT
/* Image load offset(2MB) from start of RAM */
.dword 0x200000
#else
@@ -75,7 +75,7 @@ relocate_enable_mmu:
/* Relocate return address */
la a1, kernel_map
XIP_FIXUP_OFFSET a1
- REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
+ PTR_L a1, KERNEL_MAP_VIRT_ADDR(a1)
la a2, _start
sub a1, a1, a2
add ra, ra, a1
@@ -348,8 +348,8 @@ clear_bss_done:
*/
.Lwait_for_cpu_up:
/* FIXME: We should WFI to save some energy here. */
- REG_L sp, (a1)
- REG_L tp, (a2)
+ PTR_L sp, (a1)
+ PTR_L tp, (a2)
beqz sp, .Lwait_for_cpu_up
beqz tp, .Lwait_for_cpu_up
fence