diff mbox series

[v2,58/70] x86/stack: CFI hardening

Message ID 20220214125127.17985-59-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86: Support for CET Indirect Branch Tracking | expand

Commit Message

Andrew Cooper Feb. 14, 2022, 12:51 p.m. UTC
Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.

Use cf_check to annotate function pointer targets for the toolchain.

The function typecheck in switch_stack_and_jump() is incompatible with control
flow typechecking.  It's ok for reset_stack_and_jump_ind(), but for
reset_stack_and_jump(), it would force us to ENDBR64 the targets which are
branched to directly.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---

v2:
 * Extend reset_stack_and_jump_ind() with ({ })
---
 xen/arch/x86/domain.c                  | 6 +++---
 xen/arch/x86/hvm/svm/svm.c             | 6 +++---
 xen/arch/x86/hvm/vmx/vmcs.c            | 2 +-
 xen/arch/x86/hvm/vmx/vmx.c             | 8 ++++----
 xen/arch/x86/include/asm/current.h     | 6 ++++--
 xen/arch/x86/include/asm/hvm/vmx/vmx.h | 2 +-
 xen/arch/x86/include/asm/pv/domain.h   | 4 ++--
 xen/arch/x86/pv/domain.c               | 2 +-
 xen/arch/x86/x86_64/entry.S            | 1 +
 9 files changed, 20 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ae7c88b51af1..afccc1525f8b 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -132,7 +132,7 @@  void play_dead(void)
         dead_idle();
 }
 
-static void noreturn idle_loop(void)
+static void noreturn cf_check idle_loop(void)
 {
     unsigned int cpu = smp_processor_id();
     /*
@@ -1790,7 +1790,7 @@  static void save_segments(struct vcpu *v)
     }
 }
 
-void paravirt_ctxt_switch_from(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v)
 {
     save_segments(v);
 
@@ -1804,7 +1804,7 @@  void paravirt_ctxt_switch_from(struct vcpu *v)
         write_debugreg(7, 0);
 }
 
-void paravirt_ctxt_switch_to(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v)
 {
     root_pgentry_t *root_pgt = this_cpu(root_pgt);
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index dedb2848e6a1..63535a74b504 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -944,7 +944,7 @@  static inline void svm_tsc_ratio_load(struct vcpu *v)
         wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain));
 }
 
-static void svm_ctxt_switch_from(struct vcpu *v)
+static void cf_check svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
 
@@ -969,7 +969,7 @@  static void svm_ctxt_switch_from(struct vcpu *v)
     enable_each_ist(idt_tables[cpu]);
 }
 
-static void svm_ctxt_switch_to(struct vcpu *v)
+static void cf_check svm_ctxt_switch_to(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     int cpu = smp_processor_id();
@@ -996,7 +996,7 @@  static void svm_ctxt_switch_to(struct vcpu *v)
         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
 }
 
-static void noreturn svm_do_resume(void)
+static void noreturn cf_check svm_do_resume(void)
 {
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 60b506ac3f40..e1e1fa14e65e 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1865,7 +1865,7 @@  void vmx_vmentry_failure(void)
 
 void noreturn vmx_asm_do_vmentry(void);
 
-void vmx_do_resume(void)
+void cf_check vmx_do_resume(void)
 {
     struct vcpu *v = current;
     bool_t debug_state;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 2c4804f9b884..41db538a9e3d 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -63,8 +63,8 @@ 
 static bool_t __initdata opt_force_ept;
 boolean_param("force-ept", opt_force_ept);
 
-static void vmx_ctxt_switch_from(struct vcpu *v);
-static void vmx_ctxt_switch_to(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v);
 
 static int alloc_vlapic_mapping(void);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
@@ -907,7 +907,7 @@  static void cf_check vmx_fpu_leave(struct vcpu *v)
     }
 }
 
-static void vmx_ctxt_switch_from(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
 {
     /*
      * Return early if trying to do a context switch without VMX enabled,
@@ -939,7 +939,7 @@  static void vmx_ctxt_switch_from(struct vcpu *v)
         vmx_pi_switch_from(v);
 }
 
-static void vmx_ctxt_switch_to(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v)
 {
     vmx_restore_guest_msrs(v);
     vmx_restore_dr(v);
diff --git a/xen/arch/x86/include/asm/current.h b/xen/arch/x86/include/asm/current.h
index dc0edd9ed07d..da5e152a10cc 100644
--- a/xen/arch/x86/include/asm/current.h
+++ b/xen/arch/x86/include/asm/current.h
@@ -173,7 +173,6 @@  unsigned long get_stack_dump_bottom (unsigned long sp);
 #define switch_stack_and_jump(fn, instr, constr)                        \
     ({                                                                  \
         unsigned int tmp;                                               \
-        (void)((fn) == (void (*)(void))NULL);                           \
         BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn));                      \
         __asm__ __volatile__ (                                          \
             SHADOW_STACK_WORK                                           \
@@ -198,7 +197,10 @@  unsigned long get_stack_dump_bottom (unsigned long sp);
 
 /* The constraint may only specify non-call-clobbered registers. */
 #define reset_stack_and_jump_ind(fn)                                    \
-    switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
+    ({                                                                  \
+        (void)((fn) == (void (*)(void))NULL);                           \
+        switch_stack_and_jump(fn, "INDIRECT_JMP %", "b");               \
+    })
 
 /*
  * Which VCPU's state is currently running on each CPU?
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 5284fe931f62..c2ebdd6864a5 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -93,7 +93,7 @@  typedef enum {
 
 void vmx_asm_vmexit_handler(struct cpu_user_regs);
 void vmx_intr_assist(void);
-void noreturn vmx_do_resume(void);
+void noreturn cf_check vmx_do_resume(void);
 void vmx_vlapic_msr_changed(struct vcpu *v);
 struct hvm_emulate_ctxt;
 void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
diff --git a/xen/arch/x86/include/asm/pv/domain.h b/xen/arch/x86/include/asm/pv/domain.h
index 6b16da9d187b..924508bbb4f0 100644
--- a/xen/arch/x86/include/asm/pv/domain.h
+++ b/xen/arch/x86/include/asm/pv/domain.h
@@ -118,8 +118,8 @@  static inline void pv_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
 
 #endif	/* CONFIG_PV */
 
-void paravirt_ctxt_switch_from(struct vcpu *v);
-void paravirt_ctxt_switch_to(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v);
 
 #endif	/* __X86_PV_DOMAIN_H__ */
 
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
index 55146c15c853..f94f28c8e271 100644
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -351,7 +351,7 @@  void pv_domain_destroy(struct domain *d)
     FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
 }
 
-void noreturn continue_pv_domain(void);
+void noreturn cf_check continue_pv_domain(void);
 
 int pv_domain_initialise(struct domain *d)
 {
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 3eaf0e67b2b9..8494b97a54a2 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -625,6 +625,7 @@  ENTRY(dom_crash_sync_extable)
 /* No special register assumptions. */
 #ifdef CONFIG_PV
 ENTRY(continue_pv_domain)
+        ENDBR64
         call  check_wakeup_from_wait
 ret_from_intr:
         GET_CURRENT(bx)