@@ -3,29 +3,19 @@
/*
* This is the most generic implementation of unaligned accesses
- * and should work almost anywhere.
+ * and should work almost anywhere, we trust that the compiler
+ * knows how to handle unaligned accesses.
*/
#include <asm/byteorder.h>
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/generic.h>
#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
@@ -2,35 +2,36 @@
#define _LINUX_UNALIGNED_BE_STRUCT_H
#include <linux/unaligned/packed_struct.h>
+#include <asm/byteorder.h>
static inline u16 get_unaligned_be16(const void *p)
{
- return __get_unaligned_cpu16((const u8 *)p);
+ return be16_to_cpu((__le16 __force)__get_unaligned_cpu16((const u8 *)p));
}
static inline u32 get_unaligned_be32(const void *p)
{
- return __get_unaligned_cpu32((const u8 *)p);
+ return be32_to_cpu((__le32 __force)__get_unaligned_cpu32((const u8 *)p));
}
static inline u64 get_unaligned_be64(const void *p)
{
- return __get_unaligned_cpu64((const u8 *)p);
+ return be64_to_cpu((__le64 __force)__get_unaligned_cpu64((const u8 *)p));
}
static inline void put_unaligned_be16(u16 val, void *p)
{
- __put_unaligned_cpu16(val, p);
+ __put_unaligned_cpu16((u16 __force)cpu_to_be16(val), p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
- __put_unaligned_cpu32(val, p);
+ __put_unaligned_cpu32((u32 __force)cpu_to_be32(val), p);
}
static inline void put_unaligned_be64(u64 val, void *p)
{
- __put_unaligned_cpu64(val, p);
+ __put_unaligned_cpu64((u64 __force)cpu_to_be64(val), p);
}
#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
@@ -2,35 +2,36 @@
#define _LINUX_UNALIGNED_LE_STRUCT_H
#include <linux/unaligned/packed_struct.h>
+#include <asm/byteorder.h>
static inline u16 get_unaligned_le16(const void *p)
{
- return __get_unaligned_cpu16((const u8 *)p);
+ return le16_to_cpu((__le16 __force)__get_unaligned_cpu16((const u8 *)p));
}
static inline u32 get_unaligned_le32(const void *p)
{
- return __get_unaligned_cpu32((const u8 *)p);
+ return le32_to_cpu((__le32 __force)__get_unaligned_cpu32((const u8 *)p));
}
static inline u64 get_unaligned_le64(const void *p)
{
- return __get_unaligned_cpu64((const u8 *)p);
+ return le64_to_cpu((__le64 __force)__get_unaligned_cpu64((const u8 *)p));
}
static inline void put_unaligned_le16(u16 val, void *p)
{
- __put_unaligned_cpu16(val, p);
+ __put_unaligned_cpu16((u16 __force)cpu_to_le16(val), p);
}
static inline void put_unaligned_le32(u32 val, void *p)
{
- __put_unaligned_cpu32(val, p);
+ __put_unaligned_cpu32((u32 __force)cpu_to_le32(val), p);
}
static inline void put_unaligned_le64(u64 val, void *p)
{
- __put_unaligned_cpu64(val, p);
+ __put_unaligned_cpu64((u64 __force)cpu_to_le64(val), p);
}
#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */