diff mbox series

[v3,64/70] x86: Introduce helpers/checks for endbr64 instructions

Message ID 20220222152645.8844-16-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86: Support for CET Indirect Branch Tracking | expand

Commit Message

Andrew Cooper Feb. 22, 2022, 3:26 p.m. UTC
... to prevent the optimiser creating unsafe code.  See the code comment for
full details.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>

v3:
 * Introduce ENDBR64_LEN
v2:
 * Fix include to let the header be standalone
 * Add earlyclobber to asm
v1.1:
 * New
---
 xen/arch/x86/include/asm/endbr.h | 55 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)
 create mode 100644 xen/arch/x86/include/asm/endbr.h
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/endbr.h b/xen/arch/x86/include/asm/endbr.h
new file mode 100644
index 000000000000..6090afeb0bd8
--- /dev/null
+++ b/xen/arch/x86/include/asm/endbr.h
@@ -0,0 +1,55 @@ 
+/******************************************************************************
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (c) 2021-2022 Citrix Systems Ltd.
+ */
+#ifndef XEN_ASM_ENDBR_H
+#define XEN_ASM_ENDBR_H
+
+#include <xen/types.h>
+
+#define ENDBR64_LEN 4
+
+/*
+ * In some cases we need to inspect/insert endbr64 instructions.
+ *
+ * The naive way, mem{cmp,cpy}(ptr, "\xf3\x0f\x1e\xfa", 4), optimises unsafely
+ * by placing 0xfa1e0ff3 in an imm32 operand, and marks a legal indirect
+ * branch target as far as the CPU is concerned.
+ *
+ * gen_endbr64() is written deliberately to avoid the problematic operand, and
+ * marked __const__ as it is safe for the optimiser to hoist/merge/etc.
+ */
+static inline uint32_t __attribute_const__ gen_endbr64(void)
+{
+    uint32_t res;
+
+    asm ( "mov $~0xfa1e0ff3, %[res]\n\t"
+          "not %[res]\n\t"
+          : [res] "=&r" (res) );
+
+    return res;
+}
+
+static inline bool is_endbr64(const void *ptr)
+{
+    return *(const uint32_t *)ptr == gen_endbr64();
+}
+
+static inline void place_endbr64(void *ptr)
+{
+    *(uint32_t *)ptr = gen_endbr64();
+}
+
+#endif /* XEN_ASM_ENDBR_H */