@@ -829,7 +829,8 @@ The implementation must also have a mechanism for:
* Be able to lookup in the Xen hypervisor the symbol names of functions from the ELF payload.
* Be able to patch .rodata, .bss, and .data sections.
* Further safety checks (blacklist of which functions cannot be patched, check
- the stack, make sure the payload is built with same compiler as hypervisor).
+ the stack, make sure the payload is built with same compiler as hypervisor,
+ and NMI/MCE handlers and do_nmi for right now - until an safe solution is found).
* NOP out the code sequence if `new_size` is zero.
* Deal with other relocation types: R_X86_64_[8,16,32,32S], R_X86_64_PC[8,16,64] in payload file.
@@ -6,6 +6,26 @@
#include <xen/xsplice_elf.h>
#include <xen/xsplice.h>
+void arch_xsplice_patching_enter(void)
+{
+}
+
+void arch_xsplice_patching_leave(void)
+{
+}
+
+void arch_xsplice_apply_jmp(struct xsplice_patch_func *func)
+{
+}
+
+void arch_xsplice_revert_jmp(struct xsplice_patch_func *func)
+{
+}
+
+void arch_xsplice_post_action(void)
+{
+}
+
int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data)
{
return -ENOSYS;
@@ -36,6 +36,7 @@
#include <xen/cpu.h>
#include <xen/wait.h>
#include <xen/guest_access.h>
+#include <xen/xsplice.h>
#include <public/sysctl.h>
#include <public/hvm/hvm_vcpu.h>
#include <asm/regs.h>
@@ -121,6 +122,7 @@ static void idle_loop(void)
(*pm_idle)();
do_tasklet();
do_softirq();
+ check_for_xsplice_work(); /* Must be last. */
}
}
@@ -137,6 +139,7 @@ void startup_cpu_idle_loop(void)
static void noreturn continue_idle_domain(struct vcpu *v)
{
+ check_for_xsplice_work();
reset_stack_and_jump(idle_loop);
}
@@ -144,6 +147,7 @@ static void noreturn continue_nonidle_domain(struct vcpu *v)
{
check_wakeup_from_wait();
mark_regs_dirty(guest_cpu_user_regs());
+ check_for_xsplice_work();
reset_stack_and_jump(ret_from_intr);
}
@@ -26,6 +26,7 @@
#include <xen/hypercall.h>
#include <xen/domain_page.h>
#include <xen/xenoprof.h>
+#include <xen/xsplice.h>
#include <asm/current.h>
#include <asm/io.h>
#include <asm/paging.h>
@@ -1096,6 +1097,7 @@ static void noreturn svm_do_resume(struct vcpu *v)
hvm_do_resume(v);
+ check_for_xsplice_work();
reset_stack_and_jump(svm_asm_do_resume);
}
@@ -25,6 +25,7 @@
#include <xen/kernel.h>
#include <xen/keyhandler.h>
#include <xen/vm_event.h>
+#include <xen/xsplice.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
@@ -1722,6 +1723,7 @@ void vmx_do_resume(struct vcpu *v)
}
hvm_do_resume(v);
+ check_for_xsplice_work();
reset_stack_and_jump(vmx_asm_do_vmentry);
}
@@ -10,6 +10,45 @@
#include <xen/xsplice_elf.h>
#include <xen/xsplice.h>
+#define PATCH_INSN_SIZE 5
+
+void arch_xsplice_patching_enter(void)
+{
+ /* Disable WP to allow changes to read-only pages. */
+ write_cr0(read_cr0() & ~X86_CR0_WP);
+}
+
+void arch_xsplice_patching_leave(void)
+{
+ /* Reinstate WP. */
+ write_cr0(read_cr0() | X86_CR0_WP);
+}
+
+void arch_xsplice_apply_jmp(struct xsplice_patch_func *func)
+{
+ uint32_t val;
+ uint8_t *old_ptr;
+
+ BUILD_BUG_ON(PATCH_INSN_SIZE < sizeof(*func->undo));
+
+ old_ptr = (uint8_t *)func->old_addr;
+ memcpy(func->undo, old_ptr, PATCH_INSN_SIZE);
+
+ *old_ptr++ = 0xe9; /* Relative jump */
+ val = func->new_addr - func->old_addr - PATCH_INSN_SIZE;
+ memcpy(old_ptr, &val, sizeof val);
+}
+
+void arch_xsplice_revert_jmp(struct xsplice_patch_func *func)
+{
+ memcpy((void *)func->old_addr, func->undo, PATCH_INSN_SIZE);
+}
+
+void arch_xsplice_post_action(void)
+{
+ cpuid_eax(0);
+}
+
int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data)
{
@@ -3,6 +3,7 @@
*
*/
+#include <xen/cpu.h>
#include <xen/guest_access.h>
#include <xen/keyhandler.h>
#include <xen/lib.h>
@@ -10,17 +11,29 @@
#include <xen/mm.h>
#include <xen/sched.h>
#include <xen/smp.h>
+#include <xen/softirq.h>
#include <xen/spinlock.h>
#include <xen/vmap.h>
+#include <xen/wait.h>
#include <xen/xsplice_elf.h>
#include <xen/xsplice.h>
#include <asm/event.h>
+#include <asm/nmi.h>
#include <public/sysctl.h>
+/*
+ * Protects against payload_list operations and also allows only one
+ * caller in schedule_work.
+ */
static DEFINE_SPINLOCK(payload_lock);
static LIST_HEAD(payload_list);
+/*
+ * Patches which have been applied.
+ */
+static LIST_HEAD(applied_list);
+
static unsigned int payload_cnt;
static unsigned int payload_version = 1;
@@ -31,9 +44,31 @@ struct payload {
void *payload_address; /* Virtual address mapped. */
size_t payload_pages; /* Nr of the pages. */
mfn_t *mfn; /* Array of MFNs of the pages. */
+ struct list_head applied_list; /* Linked to 'applied_list'. */
+ struct xsplice_patch_func *funcs; /* The array of functions to patch. */
+ unsigned int nfuncs; /* Nr of functions to patch. */
char name[XEN_XSPLICE_NAME_SIZE + 1];/* Name of it. */
};
+/* Defines an outstanding patching action. */
+struct xsplice_work
+{
+ atomic_t semaphore; /* Used for rendezvous. First to grab it will
+ do the patching. */
+ atomic_t irq_semaphore; /* Used to signal all IRQs disabled. */
+ uint32_t timeout; /* Timeout to do the operation. */
+ struct payload *data; /* The payload on which to act. */
+ volatile bool_t do_work; /* Signals work to do. */
+ volatile bool_t ready; /* Signals all CPUs synchronized. */
+ uint32_t cmd; /* Action request: XSPLICE_ACTION_* */
+};
+
+/* There can be only one outstanding patching action. */
+static struct xsplice_work xsplice_work;
+
+/* Indicate whether the CPU needs to consult xsplice_work structure. */
+static DEFINE_PER_CPU(bool_t, work_to_do);
+
static int verify_name(const xen_xsplice_name_t *name)
{
if ( name->size == 0 || name->size > XEN_XSPLICE_NAME_SIZE )
@@ -225,6 +260,72 @@ static int secure_payload(struct payload *payload, struct xsplice_elf *elf)
return rc;
}
+static int check_special_sections(struct payload *payload,
+ struct xsplice_elf *elf)
+{
+ unsigned int i;
+ static const char *const names[] = { ".xsplice.funcs" };
+
+ for ( i = 0; i < ARRAY_SIZE(names); i++ )
+ {
+ struct xsplice_elf_sec *sec;
+
+ sec = xsplice_elf_sec_by_name(elf, names[i]);
+ if ( !sec )
+ {
+ printk(XENLOG_ERR "%s%s: %s is missing!\n",
+ XSPLICE, elf->name, names[i]);
+ return -EINVAL;
+ }
+ if ( !sec->sec->sh_size )
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int prepare_payload(struct payload *payload,
+ struct xsplice_elf *elf)
+{
+ struct xsplice_elf_sec *sec;
+ unsigned int i;
+ struct xsplice_patch_func *f;
+
+ sec = xsplice_elf_sec_by_name(elf, ".xsplice.funcs");
+ if ( sec )
+ {
+ if ( sec->sec->sh_size % sizeof *payload->funcs )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: Wrong size of .xsplice.funcs!\n",
+ XSPLICE, elf->name);
+ return -EINVAL;
+ }
+ payload->funcs = (struct xsplice_patch_func *)sec->load_addr;
+ payload->nfuncs = sec->sec->sh_size / (sizeof *payload->funcs);
+ }
+
+ for ( i = 0; i < payload->nfuncs; i++ )
+ {
+ unsigned int j;
+
+ f = &(payload->funcs[i]);
+
+ if ( !f->new_addr || !f->old_addr || !f->old_size || !f->new_size )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: Address or size fields are zero!\n",
+ XSPLICE, elf->name);
+ return -EINVAL;
+ }
+ for ( j = 0; j < 8; j++ )
+ if ( f->undo[j] )
+ return -EINVAL;
+
+ for ( j = 0; j < 24; j++ )
+ if ( f->pad[j] )
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* We MUST be holding the payload_lock spinlock.
*/
@@ -267,8 +368,15 @@ static int load_payload_data(struct payload *payload, void *raw, ssize_t len)
if ( rc )
goto out;
- rc = secure_payload(payload, &elf);
+ rc = check_special_sections(payload, &elf);
+ if ( rc )
+ goto out;
+ rc = prepare_payload(payload, &elf);
+ if ( rc )
+ goto out;
+
+ rc = secure_payload(payload, &elf);
out:
if ( rc )
free_payload_data(payload);
@@ -320,6 +428,7 @@ static int xsplice_upload(xen_sysctl_xsplice_upload_t *upload)
data->state = XSPLICE_STATE_CHECKED;
data->rc = 0;
INIT_LIST_HEAD(&data->list);
+ INIT_LIST_HEAD(&data->applied_list);
spin_lock_recursive(&payload_lock);
list_add_tail(&data->list, &payload_list);
@@ -414,6 +523,315 @@ static int xsplice_list(xen_sysctl_xsplice_list_t *list)
return rc ? : idx;
}
+/*
+ * The following functions get the CPUs into an appropriate state and
+ * apply (or revert) each of the payload's functions. This is needed
+ * for XEN_SYSCTL_XSPLICE_ACTION operation (see xsplice_action).
+ */
+
+static int apply_payload(struct payload *data)
+{
+ unsigned int i;
+
+ dprintk(XENLOG_DEBUG, "%s%s: Applying %u functions.\n", XSPLICE,
+ data->name, data->nfuncs);
+
+ arch_xsplice_patching_enter();
+
+ for ( i = 0; i < data->nfuncs; i++ )
+ arch_xsplice_apply_jmp(&data->funcs[i]);
+
+ arch_xsplice_patching_leave();
+
+ list_add_tail(&data->applied_list, &applied_list);
+
+ return 0;
+}
+
+/*
+ * This function is executed having all other CPUs with no stack (we may
+ * have cpu_idle on it) and IRQs disabled.
+ */
+static int revert_payload(struct payload *data)
+{
+ unsigned int i;
+
+ dprintk(XENLOG_DEBUG, "%s%s: Reverting.\n", XSPLICE, data->name);
+
+ arch_xsplice_patching_enter();
+
+ for ( i = 0; i < data->nfuncs; i++ )
+ arch_xsplice_revert_jmp(&data->funcs[i]);
+
+ arch_xsplice_patching_leave();
+
+ list_del_init(&data->applied_list);
+
+ return 0;
+}
+
+/*
+ * This function is executed having all other CPUs with no stack (we may
+ * have cpu_idle on it) and IRQs disabled. We guard against NMI by temporarily
+ * installing our NOP NMI handler.
+ */
+static void xsplice_do_action(void)
+{
+ int rc;
+ struct payload *data, *other, *tmp;
+
+ data = xsplice_work.data;
+ /* Now this function should be the only one on any stack.
+ * No need to lock the payload list or applied list. */
+ switch ( xsplice_work.cmd )
+ {
+ case XSPLICE_ACTION_APPLY:
+ rc = apply_payload(data);
+ if ( rc == 0 )
+ data->state = XSPLICE_STATE_APPLIED;
+ break;
+ case XSPLICE_ACTION_REVERT:
+ rc = revert_payload(data);
+ if ( rc == 0 )
+ data->state = XSPLICE_STATE_CHECKED;
+ break;
+ case XSPLICE_ACTION_REPLACE:
+ rc = 0;
+ /* N.B: Use 'applied_list' member, not 'list'. */
+ list_for_each_entry_safe_reverse ( other, tmp, &applied_list, applied_list )
+ {
+ other->rc = revert_payload(other);
+ if ( other->rc == 0 )
+ other->state = XSPLICE_STATE_CHECKED;
+ else
+ {
+ rc = -EINVAL;
+ break;
+ }
+ }
+ if ( rc != -EINVAL )
+ {
+ rc = apply_payload(data);
+ if ( rc == 0 )
+ data->state = XSPLICE_STATE_APPLIED;
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ data->rc = rc;
+}
+
+/*
+ * MUST be holding the payload_lock.
+ */
+static int schedule_work(struct payload *data, uint32_t cmd, uint32_t timeout)
+{
+ unsigned int cpu;
+
+ ASSERT(spin_is_locked(&payload_lock));
+
+ /* Fail if an operation is already scheduled. */
+ if ( xsplice_work.do_work )
+ return -EBUSY;
+
+ if ( !get_cpu_maps() )
+ {
+ printk(XENLOG_ERR "%s%s: unable to get cpu_maps lock!\n",
+ XSPLICE, data->name);
+ return -EBUSY;
+ }
+
+ xsplice_work.cmd = cmd;
+ xsplice_work.data = data;
+ xsplice_work.timeout = timeout ?: MILLISECS(30);
+
+ dprintk(XENLOG_DEBUG, "%s%s: timeout is %"PRI_stime"ms\n",
+ XSPLICE, data->name, xsplice_work.timeout / MILLISECS(1));
+
+ /*
+ * Once the patching has been completed, the semaphore value will
+ * be num_online_cpus()-1.
+ */
+ atomic_set(&xsplice_work.semaphore, -1);
+ atomic_set(&xsplice_work.irq_semaphore, -1);
+
+ xsplice_work.ready = 0;
+ smp_wmb();
+ xsplice_work.do_work = 1;
+ smp_wmb();
+ /*
+ * Above smp_wmb() gives us an compiler barrier, as we MUST do this
+ * after setting the global structure.
+ */
+ for_each_online_cpu ( cpu )
+ per_cpu(work_to_do, cpu) = 1;
+
+ put_cpu_maps();
+
+ return 0;
+}
+
+/*
+ * Note that because of this NOP code the do_nmi is not safely patchable.
+ * Also if we do receive 'real' NMIs we have lost them. Ditto for MCE.
+ */
+static int mask_nmi_callback(const struct cpu_user_regs *regs, int cpu)
+{
+ /* TODO: Handle missing NMI/MCE.*/
+ return 1;
+}
+
+static void reschedule_fn(void *unused)
+{
+ smp_mb(); /* Synchronize with setting do_work */
+ raise_softirq(SCHEDULE_SOFTIRQ);
+}
+
+static int xsplice_do_wait(atomic_t *counter, s_time_t timeout,
+ unsigned int total_cpus, const char *s)
+{
+ int rc = 0;
+
+ while ( atomic_read(counter) != total_cpus && NOW() < timeout )
+ cpu_relax();
+
+ /* Log & abort. */
+ if ( atomic_read(counter) != total_cpus )
+ {
+ printk(XENLOG_ERR "%s%s: %s %u/%u\n", XSPLICE,
+ xsplice_work.data->name, s, atomic_read(counter), total_cpus);
+ rc = -EBUSY;
+ xsplice_work.data->rc = rc;
+ xsplice_work.do_work = 0;
+ smp_wmb();
+ }
+ return rc;
+}
+
+/*
+ * The main function which manages the work of quiescing the system and
+ * patching code.
+ */
+void check_for_xsplice_work(void)
+{
+ unsigned int cpu = smp_processor_id();
+ nmi_callback_t saved_nmi_callback;
+ s_time_t timeout;
+ unsigned long flags;
+
+ /* Fast path: no work to do. */
+ if ( !per_cpu(work_to_do, cpu ) )
+ return;
+
+ /* In case we aborted, other CPUs can skip right away. */
+ if ( (!xsplice_work.do_work) )
+ {
+ per_cpu(work_to_do, cpu) = 0;
+ return;
+ }
+
+ ASSERT(local_irq_is_enabled());
+
+ /* Set at -1, so will go up to num_online_cpus - 1. */
+ if ( atomic_inc_and_test(&xsplice_work.semaphore) )
+ {
+ struct payload *p;
+ unsigned int total_cpus;
+
+ p = xsplice_work.data;
+ if ( !get_cpu_maps() )
+ {
+ printk(XENLOG_ERR "%s%s: CPU%u - unable to get cpu_maps lock!\n",
+ XSPLICE, p->name, cpu);
+ per_cpu(work_to_do, cpu) = 0;
+ xsplice_work.data->rc = -EBUSY;
+ xsplice_work.do_work = 0;
+ /*
+ * Do NOT decrement semaphore down - as that may cause the other
+ * CPU (which may be at this exact moment checking the ASSERT)
+ * to assume the role of master and then needlessly time out
+ * out (as do_work is zero).
+ */
+ return;
+ }
+
+ barrier(); /* MUST do it after get_cpu_maps. */
+ total_cpus = num_online_cpus() - 1;
+
+ if ( total_cpus )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: CPU%u - IPIing the %u CPUs\n",
+ XSPLICE, p->name, cpu, total_cpus);
+ smp_call_function(reschedule_fn, NULL, 0);
+ }
+
+ timeout = xsplice_work.timeout + NOW();
+ if ( xsplice_do_wait(&xsplice_work.semaphore, timeout, total_cpus,
+ "Timed out on CPU semaphore") )
+ goto abort;
+
+ /* "Mask" NMIs. */
+ saved_nmi_callback = set_nmi_callback(mask_nmi_callback);
+
+ /* All CPUs are waiting, now signal to disable IRQs. */
+ xsplice_work.ready = 1;
+ smp_wmb();
+
+ atomic_inc(&xsplice_work.irq_semaphore);
+ if ( !xsplice_do_wait(&xsplice_work.irq_semaphore, timeout, total_cpus,
+ "Timed out on IRQ semaphore") )
+ {
+ local_irq_save(flags);
+ /* Do the patching. */
+ xsplice_do_action();
+ /* To flush out pipeline. */
+ arch_xsplice_post_action();
+ local_irq_restore(flags);
+ }
+ set_nmi_callback(saved_nmi_callback);
+
+ abort:
+ per_cpu(work_to_do, cpu) = 0;
+ xsplice_work.do_work = 0;
+
+ smp_wmb(); /* Synchronize with waiting CPUs. */
+ ASSERT(local_irq_is_enabled());
+
+ put_cpu_maps();
+
+ printk(XENLOG_INFO "%s%s finished with rc=%d\n", XSPLICE,
+ p->name, p->rc);
+ }
+ else
+ {
+ /* Wait for all CPUs to rendezvous. */
+ while ( xsplice_work.do_work && !xsplice_work.ready )
+ {
+ cpu_relax();
+ smp_rmb();
+ }
+
+ /* Disable IRQs and signal. */
+ local_irq_save(flags);
+ atomic_inc(&xsplice_work.irq_semaphore);
+
+ /* Wait for patching to complete. */
+ while ( xsplice_work.do_work )
+ {
+ cpu_relax();
+ smp_rmb();
+ }
+ /* To flush out pipeline. */
+ arch_xsplice_post_action();
+ local_irq_restore(flags);
+
+ per_cpu(work_to_do, cpu) = 0;
+ }
+}
+
static int xsplice_action(xen_sysctl_xsplice_action_t *action)
{
struct payload *data;
@@ -452,30 +870,24 @@ static int xsplice_action(xen_sysctl_xsplice_action_t *action)
case XSPLICE_ACTION_REVERT:
if ( data->state == XSPLICE_STATE_APPLIED )
{
- /* No implementation yet. */
- data->state = XSPLICE_STATE_CHECKED;
- data->rc = 0;
- rc = 0;
+ data->rc = -EAGAIN;
+ rc = schedule_work(data, action->cmd, action->timeout);
}
break;
case XSPLICE_ACTION_APPLY:
if ( (data->state == XSPLICE_STATE_CHECKED) )
{
- /* No implementation yet. */
- data->state = XSPLICE_STATE_APPLIED;
- data->rc = 0;
- rc = 0;
+ data->rc = -EAGAIN;
+ rc = schedule_work(data, action->cmd, action->timeout);
}
break;
case XSPLICE_ACTION_REPLACE:
if ( data->state == XSPLICE_STATE_CHECKED )
{
- /* No implementation yet. */
- data->state = XSPLICE_STATE_CHECKED;
- data->rc = 0;
- rc = 0;
+ data->rc = -EAGAIN;
+ rc = schedule_work(data, action->cmd, action->timeout);
}
break;
@@ -541,19 +953,32 @@ static const char *state2str(uint32_t state)
static void xsplice_printall(unsigned char key)
{
struct payload *data;
+ unsigned int i;
spin_lock_recursive(&payload_lock);
list_for_each_entry ( data, &payload_list, list )
- printk(" name=%s state=%s(%d) %p using %zu pages.\n", data->name,
+ {
+ printk(" name=%s state=%s(%d) %p using %zu pages:\n", data->name,
state2str(data->state), data->state, data->payload_address,
data->payload_pages);
+ for ( i = 0; i < data->nfuncs; i++ )
+ {
+ struct xsplice_patch_func *f = &(data->funcs[i]);
+ printk(" %s patch 0x%"PRIx64"(%u) with 0x%"PRIx64"(%u)\n",
+ f->name, f->old_addr, f->old_size, f->new_addr, f->new_size);
+ if ( !(i % 100) )
+ process_pending_softirqs();
+ }
+ }
spin_unlock_recursive(&payload_lock);
}
static int __init xsplice_init(void)
{
+ BUILD_BUG_ON( sizeof(struct xsplice_patch_func) != 64 );
+
register_keyhandler('x', xsplice_printall, "print xsplicing info", 1);
arch_xsplice_register_find_space(&find_hole);
return 0;
@@ -4,6 +4,19 @@
#define register_guest_nmi_callback(a) (-ENOSYS)
#define unregister_guest_nmi_callback() (-ENOSYS)
+typedef int (*nmi_callback_t)(const struct cpu_user_regs *regs, int cpu);
+
+/**
+ * set_nmi_callback
+ *
+ * Set a handler for an NMI. Only one handler may be
+ * set. Return the old nmi callback handler.
+ */
+static inline nmi_callback_t set_nmi_callback(nmi_callback_t callback)
+{
+ return NULL;
+}
+
#endif /* ASM_NMI_H */
/*
* Local variables:
@@ -11,12 +11,30 @@ struct xsplice_elf_sec;
struct xsplice_elf_sym;
struct xen_sysctl_xsplice_op;
+#include <xen/elfstructs.h>
+/*
+ * The structure which defines the patching. This is what the hypervisor
+ * expects in the '.xsplice.func' section of the ELF file.
+ *
+ * This MUST be in sync with what the tools generate.
+ */
+struct xsplice_patch_func {
+ const char *name;
+ Elf64_Xword new_addr;
+ Elf64_Xword old_addr;
+ Elf64_Word new_size;
+ Elf64_Word old_size;
+ uint8_t undo[8];
+ uint8_t pad[24];
+};
+
#ifdef CONFIG_XSPLICE
/* Convenience define for printk. */
#define XSPLICE "xsplice: "
int xsplice_op(struct xen_sysctl_xsplice_op *);
+void check_for_xsplice_work(void);
/* Arch hooks. */
int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data);
@@ -59,6 +77,17 @@ void arch_xsplice_free_payload(void *va, unsigned int pages, enum va_type);
*/
typedef int (*find_space_t)(ssize_t, unsigned long *, unsigned long *);
void arch_xsplice_register_find_space(find_space_t cb);
+
+/*
+ * These functions are called around the critical region patching live code,
+ * for an architecture to take make appropratie global state adjustments.
+ */
+void arch_xsplice_patching_enter(void);
+void arch_xsplice_patching_leave(void);
+
+void arch_xsplice_apply_jmp(struct xsplice_patch_func *func);
+void arch_xsplice_revert_jmp(struct xsplice_patch_func *func);
+void arch_xsplice_post_action(void);
#else
#include <xen/errno.h> /* For -ENOSYS */
@@ -66,7 +95,7 @@ static inline int xsplice_op(struct xen_sysctl_xsplice_op *op)
{
return -ENOSYS;
}
-
+static inline void check_for_xsplice_work(void) { };
#endif /* CONFIG_XSPLICE */
#endif /* __XEN_XSPLICE_H__ */