@@ -62,11 +62,16 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info
}
}
+static u32 get_u16(u16 *ptr)
+{
+ return le16_to_cpu(*ptr);
+}
+
static u32 riscv_instruction_at(void *p)
{
u16 *parcel = p;
- return (u32)parcel[0] | (u32)parcel[1] << 16;
+ return (u32)get_u16(parcel+0) | (u32)get_u16(parcel+1) << 16;
}
static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
@@ -83,6 +88,8 @@ static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
/* patch the call place again */
+ call[0] = cpu_to_le32(call[0]);
+ call[1] = cpu_to_le32(call[1]);
patch_text_nosync(ptr, call, sizeof(u32) * 2);
}
@@ -98,6 +105,7 @@ static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset)
riscv_insn_insert_jtype_imm(&jal_insn, imm);
/* patch the call place again */
+ jal_insn = cpu_to_le32(jal_insn);
patch_text_nosync(ptr, &jal_insn, sizeof(u32));
}
@@ -37,15 +37,16 @@ static bool decode_cfi_insn(struct pt_regs *regs, unsigned long *target,
*/
if (get_kernel_nofault(insn, (void *)regs->epc - 4))
return false;
+ insn = le32_to_cpu(insn);
if (!riscv_insn_is_beq(insn))
return false;
-
*type = (u32)regs_ptr[RV_EXTRACT_RS1_REG(insn)];
if (get_kernel_nofault(insn, (void *)regs->epc) ||
get_kernel_nofault(insn, (void *)regs->epc + GET_INSN_LENGTH(insn)))
return false;
+ insn = le32_to_cpu(insn);
if (riscv_insn_is_jalr(insn))
rs1_num = RV_EXTRACT_RS1_REG(insn);
else if (riscv_insn_is_c_jalr(insn))
@@ -19,7 +19,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
- u32 insn;
+ __le32 insn;
if (type == JUMP_LABEL_JMP) {
long offset = jump_entry_target(entry) - jump_entry_code(entry);
@@ -36,6 +36,7 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
insn = RISCV_INSN_NOP;
}
+ insn = cpu_to_le32(insn);
mutex_lock(&text_mutex);
patch_insn_write(addr, &insn, sizeof(insn));
mutex_unlock(&text_mutex);
@@ -253,6 +253,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
+ insn = le32_to_cpu(insn);
return GET_INSN_LENGTH(insn);
}
@@ -399,6 +400,7 @@ int is_valid_bugaddr(unsigned long pc)
return 0;
if (get_kernel_nofault(insn, (bug_insn_t *)pc))
return 0;
+ insn = cpu_to_le32(insn);
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
return (insn == __BUG_INSN_32);
else
@@ -290,6 +290,7 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
* below with the upper 16 bits half.
*/
insn &= GENMASK(15, 0);
+ insn = le16_to_cpu(insn);
if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
*r_insn = insn;
return 0;
@@ -297,12 +298,14 @@ static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
epc += sizeof(u16);
if (__read_insn(regs, tmp, epc, u16))
return -EFAULT;
+ tmp = le16_to_cpu(tmp);
*r_insn = (tmp << 16) | insn;
return 0;
} else {
if (__read_insn(regs, insn, epc, u32))
return -EFAULT;
+ insn = le32_to_cpu(insn);
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
*r_insn = insn;
return 0;
The priveldged ISA spec says that all instructions should be treated as little endian, so if we load them from memory we should do le{16,32}_to_cpu on these and the reverse when storing. This fixes jump_label, bug and related functions for big endian builds. Signed-off-by: Ben Dooks <ben.dooks@codethink.co.uk> --- arch/riscv/kernel/alternative.c | 10 +++++++++- arch/riscv/kernel/cfi.c | 3 ++- arch/riscv/kernel/jump_label.c | 3 ++- arch/riscv/kernel/traps.c | 2 ++ arch/riscv/kernel/traps_misaligned.c | 3 +++ 5 files changed, 18 insertions(+), 3 deletions(-)