@@ -132,7 +132,7 @@ void play_dead(void)
dead_idle();
}
-static void noreturn idle_loop(void)
+static void noreturn cf_check idle_loop(void)
{
unsigned int cpu = smp_processor_id();
/*
@@ -1790,7 +1790,7 @@ static void save_segments(struct vcpu *v)
}
}
-void paravirt_ctxt_switch_from(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v)
{
save_segments(v);
@@ -1804,7 +1804,7 @@ void paravirt_ctxt_switch_from(struct vcpu *v)
write_debugreg(7, 0);
}
-void paravirt_ctxt_switch_to(struct vcpu *v)
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v)
{
root_pgentry_t *root_pgt = this_cpu(root_pgt);
@@ -940,7 +940,7 @@ static inline void svm_tsc_ratio_load(struct vcpu *v)
wrmsrl(MSR_AMD64_TSC_RATIO, hvm_tsc_scaling_ratio(v->domain));
}
-static void svm_ctxt_switch_from(struct vcpu *v)
+static void cf_check svm_ctxt_switch_from(struct vcpu *v)
{
int cpu = smp_processor_id();
@@ -965,7 +965,7 @@ static void svm_ctxt_switch_from(struct vcpu *v)
enable_each_ist(idt_tables[cpu]);
}
-static void svm_ctxt_switch_to(struct vcpu *v)
+static void cf_check svm_ctxt_switch_to(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
int cpu = smp_processor_id();
@@ -992,7 +992,7 @@ static void svm_ctxt_switch_to(struct vcpu *v)
wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
-static void noreturn svm_do_resume(void)
+static void noreturn cf_check svm_do_resume(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
@@ -1865,7 +1865,7 @@ void vmx_vmentry_failure(void)
void noreturn vmx_asm_do_vmentry(void);
-void vmx_do_resume(void)
+void cf_check vmx_do_resume(void)
{
struct vcpu *v = current;
bool_t debug_state;
@@ -63,8 +63,8 @@
static bool_t __initdata opt_force_ept;
boolean_param("force-ept", opt_force_ept);
-static void vmx_ctxt_switch_from(struct vcpu *v);
-static void vmx_ctxt_switch_to(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v);
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v);
static int alloc_vlapic_mapping(void);
static void vmx_install_vlapic_mapping(struct vcpu *v);
@@ -885,7 +885,7 @@ static void cf_check vmx_fpu_leave(struct vcpu *v)
}
}
-static void vmx_ctxt_switch_from(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_from(struct vcpu *v)
{
/*
* Return early if trying to do a context switch without VMX enabled,
@@ -917,7 +917,7 @@ static void vmx_ctxt_switch_from(struct vcpu *v)
vmx_pi_switch_from(v);
}
-static void vmx_ctxt_switch_to(struct vcpu *v)
+static void cf_check vmx_ctxt_switch_to(struct vcpu *v)
{
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
@@ -351,7 +351,7 @@ void pv_domain_destroy(struct domain *d)
FREE_XENHEAP_PAGE(d->arch.pv.gdt_ldt_l1tab);
}
-void noreturn continue_pv_domain(void);
+void noreturn cf_check continue_pv_domain(void);
int pv_domain_initialise(struct domain *d)
{
@@ -625,6 +625,7 @@ ENTRY(dom_crash_sync_extable)
/* No special register assumptions. */
#ifdef CONFIG_PV
ENTRY(continue_pv_domain)
+ ENDBR64
call check_wakeup_from_wait
ret_from_intr:
GET_CURRENT(bx)
@@ -173,7 +173,6 @@ unsigned long get_stack_dump_bottom (unsigned long sp);
#define switch_stack_and_jump(fn, instr, constr) \
({ \
unsigned int tmp; \
- (void)((fn) == (void (*)(void))NULL); \
BUILD_BUG_ON(!ssaj_has_attr_noreturn(fn)); \
__asm__ __volatile__ ( \
SHADOW_STACK_WORK \
@@ -198,6 +197,7 @@ unsigned long get_stack_dump_bottom (unsigned long sp);
/* The constraint may only specify non-call-clobbered registers. */
#define reset_stack_and_jump_ind(fn) \
+ (void)((fn) == (void (*)(void))NULL); \
switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
/*
@@ -93,7 +93,7 @@ typedef enum {
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_intr_assist(void);
-void noreturn vmx_do_resume(void);
+void noreturn cf_check vmx_do_resume(void);
void vmx_vlapic_msr_changed(struct vcpu *v);
struct hvm_emulate_ctxt;
void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
@@ -104,8 +104,8 @@ static inline unsigned long pv_make_cr4(const struct vcpu *v) { return ~0ul; }
#endif /* CONFIG_PV */
-void paravirt_ctxt_switch_from(struct vcpu *v);
-void paravirt_ctxt_switch_to(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_from(struct vcpu *v);
+void cf_check paravirt_ctxt_switch_to(struct vcpu *v);
#endif /* __X86_PV_DOMAIN_H__ */
The function typecheck in switch_stack_and_jump() is incompatible with control flow typechecking. It's ok for reset_stack_and_jump_ind(), but for reset_stack_and_jump(), it would force us to ENDBR64 the targets which are branched to directly. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Wei Liu <wl@xen.org> Honestly, the control flow typechecking is distinctly sub-par, but it's too late to do anything now. --- xen/arch/x86/domain.c | 6 +++--- xen/arch/x86/hvm/svm/svm.c | 6 +++--- xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 8 ++++---- xen/arch/x86/pv/domain.c | 2 +- xen/arch/x86/x86_64/entry.S | 1 + xen/include/asm-x86/current.h | 2 +- xen/include/asm-x86/hvm/vmx/vmx.h | 2 +- xen/include/asm-x86/pv/domain.h | 4 ++-- 9 files changed, 17 insertions(+), 16 deletions(-)