@@ -63,7 +63,7 @@ static u32 get_alt_insn(const struct alt
if ( insn_is_branch_imm(insn) )
{
- s32 offset = insn_get_branch_offset(insn);
+ int32_t offset = insn_get_branch_offset(insn);
unsigned long target;
target = (unsigned long)altinsnptr + offset;
@@ -32,7 +32,7 @@ void arch_livepatch_apply(const struct l
if ( func->new_addr )
{
- s32 delta;
+ int32_t delta;
/*
* PC is current address (old_addr) + 8 bytes. The semantics for a
@@ -41,11 +41,11 @@ void arch_livepatch_apply(const struct l
* ARM DDI 0406C.c, see A2.3 (pg 45) and A8.8.18 pg (pg 334,335)
*
*/
- delta = (s32)func->new_addr - (s32)(func->old_addr + 8);
+ delta = (int32_t)func->new_addr - (int32_t)(func->old_addr + 8);
/* The arch_livepatch_symbol_ok should have caught it. */
- ASSERT(delta >= -(s32)ARCH_LIVEPATCH_RANGE ||
- delta < (s32)ARCH_LIVEPATCH_RANGE);
+ ASSERT(delta >= -(int32_t)ARCH_LIVEPATCH_RANGE ||
+ delta < (int32_t)ARCH_LIVEPATCH_RANGE);
/* CPU shifts by two (left) when decoding, so we shift right by two. */
delta = delta >> 2;
@@ -113,9 +113,9 @@ bool arch_livepatch_symbol_deny(const st
return false;
}
-static s32 get_addend(unsigned char type, void *dest)
+static int32_t get_addend(unsigned char type, void *dest)
{
- s32 addend = 0;
+ int32_t addend = 0;
switch ( type ) {
case R_ARM_NONE:
@@ -149,7 +149,8 @@ static s32 get_addend(unsigned char type
return addend;
}
-static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend)
+static int perform_rel(unsigned char type, void *dest, uint32_t val,
+ int32_t addend)
{
switch ( type ) {
@@ -203,8 +204,8 @@ static int perform_rel(unsigned char typ
* arch_livepatch_verify_distance can't account of addend so we have
* to do the check here as well.
*/
- if ( (s32)val < -(s32)ARCH_LIVEPATCH_RANGE ||
- (s32)val >= (s32)ARCH_LIVEPATCH_RANGE )
+ if ( (int32_t)val < -(int32_t)ARCH_LIVEPATCH_RANGE ||
+ (int32_t)val >= (int32_t)ARCH_LIVEPATCH_RANGE )
return -EOVERFLOW;
/* CPU always shifts insn by two, so complement it. */
@@ -234,7 +235,7 @@ int arch_livepatch_perform(struct livepa
uint32_t val;
void *dest;
unsigned char type;
- s32 addend;
+ int32_t addend;
if ( use_rela )
{
@@ -223,9 +223,9 @@ u32 __kprobes aarch64_insn_gen_nop(void)
* signed value (so it can be used when computing a new branch
* target).
*/
-s32 aarch64_get_branch_offset(u32 insn)
+int32_t aarch64_get_branch_offset(uint32_t insn)
{
- s32 imm;
+ int32_t imm;
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
@@ -251,7 +251,7 @@ s32 aarch64_get_branch_offset(u32 insn)
* Encode the displacement of a branch in the imm field and return the
* updated instruction.
*/
-u32 aarch64_set_branch_offset(u32 insn, s32 offset)
+uint32_t aarch64_set_branch_offset(uint32_t insn, int32_t offset)
{
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
@@ -130,7 +130,7 @@ static int reloc_data(enum aarch64_reloc
break;
case 32:
- *(s32 *)place = sval;
+ *(int32_t *)place = sval;
if ( sval < INT32_MIN || sval > UINT32_MAX )
return -EOVERFLOW;
break;
@@ -12,11 +12,11 @@
#include <xen/stringify.h>
struct alt_instr {
- s32 orig_offset; /* offset to original instruction */
- s32 repl_offset; /* offset to replacement instruction */
- u16 cpufeature; /* cpufeature bit set for replacement */
- u8 orig_len; /* size of original instruction(s) */
- u8 repl_len; /* size of new instruction(s), <= orig_len */
+ int32_t orig_offset; /* offset to original instruction */
+ int32_t repl_offset; /* offset to replacement instruction */
+ uint16_t cpufeature; /* cpufeature bit set for replacement */
+ uint8_t orig_len; /* size of original instruction(s) */
+ uint8_t repl_len; /* size of new instruction(s), <= orig_len */
};
/* Xen: helpers used by common code. */
@@ -75,8 +75,8 @@ u64 aarch64_insn_decode_immediate(enum a
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm);
-s32 aarch64_get_branch_offset(u32 insn);
-u32 aarch64_set_branch_offset(u32 insn, s32 offset);
+int32_t aarch64_get_branch_offset(uint32_t insn);
+uint32_t aarch64_set_branch_offset(uint32_t insn, int32_t offset);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
@@ -89,12 +89,12 @@ static inline bool insn_is_branch_imm(u3
return aarch64_insn_is_branch_imm(insn);
}
-static inline s32 insn_get_branch_offset(u32 insn)
+static inline int32_t insn_get_branch_offset(uint32_t insn)
{
return aarch64_get_branch_offset(insn);
}
-static inline u32 insn_set_branch_offset(u32 insn, s32 offset)
+static inline uint32_t insn_set_branch_offset(uint32_t insn, int32_t offset)
{
return aarch64_set_branch_offset(insn, offset);
}
@@ -101,12 +101,12 @@ static void __init efi_arch_relocate_ima
}
}
-extern const s32 __trampoline_rel_start[], __trampoline_rel_stop[];
-extern const s32 __trampoline_seg_start[], __trampoline_seg_stop[];
+extern const int32_t __trampoline_rel_start[], __trampoline_rel_stop[];
+extern const int32_t __trampoline_seg_start[], __trampoline_seg_stop[];
static void __init relocate_trampoline(unsigned long phys)
{
- const s32 *trampoline_ptr;
+ const int32_t *trampoline_ptr;
trampoline_phys = phys;
@@ -406,7 +406,7 @@ copy_from_unsafe(void *to, const void __
struct exception_table_entry
{
- s32 addr, cont;
+ int32_t addr, cont;
};
extern struct exception_table_entry __start___ex_table[];
extern struct exception_table_entry __stop___ex_table[];
@@ -1859,8 +1859,8 @@ static void cf_check local_time_calibrat
* This allows us to binary shift a 32-bit tsc_elapsed such that:
* stime_elapsed < tsc_elapsed <= 2*stime_elapsed
*/
- while ( ((u32)stime_elapsed64 != stime_elapsed64) ||
- ((s32)stime_elapsed64 < 0) )
+ while ( ((uint32_t)stime_elapsed64 != stime_elapsed64) ||
+ ((int32_t)stime_elapsed64 < 0) )
{
stime_elapsed64 >>= 1;
tsc_elapsed64 >>= 1;
@@ -459,8 +459,8 @@ static inline bool bogus(u32 prod, u32 c
static inline u32 calc_unconsumed_bytes(const struct t_buf *buf)
{
- u32 prod = buf->prod, cons = buf->cons;
- s32 x;
+ uint32_t prod = buf->prod, cons = buf->cons;
+ int32_t x;
barrier(); /* must read buf->prod and buf->cons only once */
if ( bogus(prod, cons) )
@@ -478,8 +478,8 @@ static inline u32 calc_unconsumed_bytes(
static inline u32 calc_bytes_to_wrap(const struct t_buf *buf)
{
- u32 prod = buf->prod, cons = buf->cons;
- s32 x;
+ uint32_t prod = buf->prod, cons = buf->cons;
+ int32_t x;
barrier(); /* must read buf->prod and buf->cons only once */
if ( bogus(prod, cons) )
@@ -186,8 +186,8 @@ typedef int INT32;
/*! [End] no source code translation !*/
-typedef u32 acpi_native_uint;
-typedef s32 acpi_native_int;
+typedef uint32_t acpi_native_uint;
+typedef int32_t acpi_native_int;
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;
@@ -14,7 +14,7 @@
typedef int8_t s8, __s8;
typedef uint8_t __u8;
typedef int16_t s16, __s16;
-typedef int32_t __s32;
+typedef int32_t s32, __s32;
typedef int64_t __s64;
typedef paddr_t phys_addr_t;
@@ -7,7 +7,6 @@
/* Linux inherited types which are being phased out */
typedef uint8_t u8;
typedef uint16_t u16, __u16;
-typedef int32_t s32;
typedef uint32_t u32, __u32;
typedef int64_t s64;
typedef uint64_t u64, __u64;
... and move the type itself to linux-compat.h. While doing so switch a few adjacent types as well, for (a little bit of) consistency. Signed-off-by: Jan Beulich <jbeulich@suse.com>