diff mbox series

[kvm-unit-tests,v2,03/18] lib: Add WRITE_ONCE and READ_ONCE implementations in compiler.h

Message ID 20191128180418.6938-4-alexandru.elisei@arm.com (mailing list archive)
State New, archived
Headers show
Series arm/arm64: Various fixes | expand

Commit Message

Alexandru Elisei Nov. 28, 2019, 6:04 p.m. UTC
Add the WRITE_ONCE and READ_ONCE macros which are used to prevent to
prevent the compiler from optimizing a store or a load, respectively, into
something else.

Cc: Drew Jones <drjones@redhat.com>
Cc: Laurent Vivier <lvivier@redhat.com>
Cc: Thomas Huth <thuth@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 lib/linux/compiler.h | 81 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 81 insertions(+)
 create mode 100644 lib/linux/compiler.h

Comments

Thomas Huth Dec. 9, 2019, 2:21 p.m. UTC | #1
On 28/11/2019 19.04, Alexandru Elisei wrote:
> Add the WRITE_ONCE and READ_ONCE macros which are used to prevent to

Duplicated "prevent to" - please remove one.

> prevent the compiler from optimizing a store or a load, respectively, into
> something else.

Could you please also add a note here in the commit message about the
kernel version that you used as a base? ... the file seems to have
changed quite a bit in the course of time, so I think it would be good
if we know the right base later.

 Thomas
Alexandru Elisei Dec. 16, 2019, 10:15 a.m. UTC | #2
Hi,

On 12/9/19 2:21 PM, Thomas Huth wrote:
> On 28/11/2019 19.04, Alexandru Elisei wrote:
>> Add the WRITE_ONCE and READ_ONCE macros which are used to prevent to
> Duplicated "prevent to" - please remove one.
>
>> prevent the compiler from optimizing a store or a load, respectively, into
>> something else.
> Could you please also add a note here in the commit message about the
> kernel version that you used as a base? ... the file seems to have
> changed quite a bit in the course of time, so I think it would be good
> if we know the right base later.
>
>  Thomas

Will implement both suggestions, thank you for taking a look.

Thanks,
Alex
diff mbox series

Patch

diff --git a/lib/linux/compiler.h b/lib/linux/compiler.h
new file mode 100644
index 000000000000..aac84c1d711c
--- /dev/null
+++ b/lib/linux/compiler.h
@@ -0,0 +1,81 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Taken from tools/include/linux/compiler.h, with minor changes. */
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+#define barrier()	asm volatile("" : : : "memory")
+
+#define __always_inline	inline __attribute__((always_inline))
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+	switch (size) {
+	case 1: *(uint8_t *)res = *(volatile uint8_t *)p; break;
+	case 2: *(uint16_t *)res = *(volatile uint16_t *)p; break;
+	case 4: *(uint32_t *)res = *(volatile uint32_t *)p; break;
+	case 8: *(uint64_t *)res = *(volatile uint64_t *)p; break;
+	default:
+		barrier();
+		__builtin_memcpy((void *)res, (const void *)p, size);
+		barrier();
+	}
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions. If the size of the accessed data type exceeds the word size of
+ * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
+ * fall back to memcpy(). There's at least two memcpy()s: one for the
+ * __builtin_memcpy() and then one for the macro doing the copy of variable
+ * - '__u' allocated on the stack.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#define READ_ONCE(x)					\
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__c = { 0 } };			\
+	__read_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+	switch (size) {
+	case 1: *(volatile uint8_t *)p = *(uint8_t *)res; break;
+	case 2: *(volatile uint16_t *)p = *(uint16_t *)res; break;
+	case 4: *(volatile uint32_t *)p = *(uint32_t *)res; break;
+	case 8: *(volatile uint64_t *)p = *(uint64_t *)res; break;
+	default:
+		barrier();
+		__builtin_memcpy((void *)p, (const void *)res, size);
+		barrier();
+	}
+}
+
+#define WRITE_ONCE(x, val) \
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (typeof(x)) (val) }; 	\
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__LINUX_COMPILER_H */