@@ -40,6 +40,7 @@ obj-y += device.o
obj-y += decode.o
obj-y += processor.o
obj-y += smc.o
+obj-$(CONFIG_XSPLICE) += xsplice.o
#obj-bin-y += ....o
new file mode 100644
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Citrix Systems R&D Ltd.
+ */
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/xsplice_elf.h>
+#include <xen/xsplice.h>
+
+int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data)
+{
+ return -ENOSYS;
+}
+
+int arch_xsplice_perform_rel(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela)
+{
+ return -ENOSYS;
+}
+
+int arch_xsplice_perform_rela(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela)
+{
+ return -ENOSYS;
+}
+
+void *arch_xsplice_alloc_payload(unsigned int pages, enum va_type type,
+ mfn_t **mfn)
+{
+ return NULL;
+}
+
+int arch_xsplice_secure(void *va, unsigned int pages, enum va_type type,
+ const mfn_t *mfn)
+{
+ return -ENOSYS;
+}
+
+void arch_xsplice_register_find_space(find_space_t cb)
+{
+}
+
+void arch_xsplice_free_payload(void *va, unsigned int pages, enum va_type type)
+{
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -63,6 +63,7 @@ obj-y += vm_event.o
obj-y += xstate.o
obj-$(crash_debug) += gdbstub.o
+obj-$(CONFIG_XSPLICE) += xsplice.o
x86_emulate.o: x86_emulate/x86_emulate.c x86_emulate/x86_emulate.h
@@ -100,6 +100,9 @@ unsigned long __read_mostly xen_phys_start;
unsigned long __read_mostly xen_virt_end;
+unsigned long __read_mostly avail_virt_start;
+unsigned long __read_mostly avail_virt_end;
+
DEFINE_PER_CPU(struct tss_struct, init_tss);
char __section(".bss.stack_aligned") cpu0_stack[STACK_SIZE];
@@ -1206,6 +1209,10 @@ void __init noreturn __start_xen(unsigned long mbi_p)
~((1UL << L2_PAGETABLE_SHIFT) - 1);
destroy_xen_mappings(xen_virt_end, XEN_VIRT_START + BOOTSTRAP_MAP_BASE);
+ avail_virt_start = xen_virt_end;
+ avail_virt_end = XEN_VIRT_END - NR_CPUS * PAGE_SIZE;
+ BUG_ON(avail_virt_end <= avail_virt_start);
+
nr_pages = 0;
for ( i = 0; i < e820.nr_map; i++ )
if ( e820.map[i].type == E820_RAM )
new file mode 100644
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 Citrix Systems R&D Ltd.
+ */
+
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/mm.h>
+#include <xen/pfn.h>
+#include <xen/vmap.h>
+#include <xen/xsplice_elf.h>
+#include <xen/xsplice.h>
+
+int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data)
+{
+
+ Elf_Ehdr *hdr = data;
+
+ if ( !IS_ELF(*hdr) )
+ {
+ printk(XENLOG_ERR "%s%s: Not an ELF payload!\n", XSPLICE, elf->name);
+ return -EINVAL;
+ }
+ if ( elf->len < (sizeof *hdr) ||
+ !IS_ELF(*hdr) ||
+ hdr->e_ident[EI_CLASS] != ELFCLASS64 ||
+ hdr->e_ident[EI_DATA] != ELFDATA2LSB ||
+ hdr->e_ident[EI_OSABI] != ELFOSABI_SYSV ||
+ hdr->e_machine != EM_X86_64 ||
+ hdr->e_type != ET_REL ||
+ hdr->e_phnum != 0 )
+ {
+ printk(XENLOG_ERR "%s%s: Invalid ELF payload!\n", XSPLICE, elf->name);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int arch_xsplice_perform_rel(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela)
+{
+ dprintk(XENLOG_ERR, "%s%s: SHR_REL relocation unsupported\n",
+ XSPLICE, elf->name);
+ return -ENOSYS;
+}
+
+int arch_xsplice_perform_rela(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela)
+{
+ Elf_RelA *r;
+ unsigned int symndx, i;
+ uint64_t val;
+ uint8_t *dest;
+
+ if ( !rela->sec->sh_entsize || !rela->sec->sh_size ||
+ rela->sec->sh_entsize != sizeof(Elf_RelA) )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: Section relative header is corrupted!\n",
+ XSPLICE, elf->name);
+ return -EINVAL;
+ }
+ for ( i = 0; i < (rela->sec->sh_size / rela->sec->sh_entsize); i++ )
+ {
+ r = (Elf_RelA *)(rela->data + i * rela->sec->sh_entsize);
+ if ( (unsigned long)r > (unsigned long)(elf->hdr + elf->len) )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: Relative section %u is past end!\n",
+ XSPLICE, elf->name, i);
+ return -EINVAL;
+ }
+ symndx = ELF64_R_SYM(r->r_info);
+ if ( symndx > elf->nsym )
+ {
+ dprintk(XENLOG_DEBUG, "%s%s: Relative symbol wants symbol@%u which is past end!\n",
+ XSPLICE, elf->name, symndx);
+ return -EINVAL;
+ }
+ dest = base->load_addr + r->r_offset;
+ val = r->r_addend + elf->sym[symndx].sym->st_value;
+
+ switch ( ELF64_R_TYPE(r->r_info) )
+ {
+ case R_X86_64_NONE:
+ break;
+ case R_X86_64_64:
+ *(uint64_t *)dest = val;
+ break;
+ case R_X86_64_PLT32:
+ /*
+ * Xen uses -fpic which normally uses PLT relocations
+ * except that it sets visibility to hidden which means
+ * that they are not used. However, when gcc cannot
+ * inline memcpy it emits memcpy with default visibility
+ * which then creates a PLT relocation. It can just be
+ * treated the same as R_X86_64_PC32.
+ */
+ /* Fall through */
+ case R_X86_64_PC32:
+ *(uint32_t *)dest = val - (uint64_t)dest;
+ break;
+ default:
+ printk(XENLOG_ERR "%s%s: Unhandled relocation %lu\n",
+ XSPLICE, elf->name, ELF64_R_TYPE(r->r_info));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static find_space_t find_space_fnc = NULL;
+
+void arch_xsplice_register_find_space(find_space_t cb)
+{
+ ASSERT(!find_space_fnc);
+
+ find_space_fnc = cb;
+}
+
+static void* xsplice_map_rx(const mfn_t *mfn, unsigned int pages)
+{
+ unsigned long cur;
+ unsigned long start, end;
+
+ start = (unsigned long)avail_virt_start;
+ end = start + pages * PAGE_SIZE;
+
+ ASSERT(find_space_fnc);
+
+ if ( (find_space_fnc)(pages, &start, &end) )
+ return NULL;
+
+ if ( end >= avail_virt_end )
+ return NULL;
+
+ for ( cur = start; pages--; ++mfn, cur += PAGE_SIZE )
+ {
+ /*
+ * We would like to to RX, but we need to copy data in it first.
+ * See arch_xsplice_secure for how we lockdown.
+ */
+ if ( map_pages_to_xen(start, mfn_x(*mfn), 1, PAGE_HYPERVISOR_RWX) )
+ {
+ if ( cur != start )
+ destroy_xen_mappings(start, cur);
+ return NULL;
+ }
+ }
+ return (void*)start;
+}
+
+/*
+ * The function prepares an xSplice payload by allocating space which
+ * then can be used for loading the allocated sections, resolving symbols,
+ * performing relocations, etc.
+ */
+void *arch_xsplice_alloc_payload(unsigned int pages, enum va_type type,
+ mfn_t **mfn)
+{
+ vmap_cb_t cb = NULL;
+ unsigned int i;
+ void *p;
+
+ ASSERT(pages); /* Which is in bytes. */
+ ASSERT(mfn && !*mfn);
+
+ /*
+ * Initially the pages allocated must have W otherwise we can't
+ * put anything in them.
+ */
+ if ( type == XSPLICE_VA_RX )
+ cb = xsplice_map_rx;
+ else
+ cb = vmap;
+
+ *mfn = NULL;
+ /*
+ * We let the vmalloc allocate the pages we need, and use
+ * our callback.
+ */
+ p = vmalloc_cb(pages * PAGE_SIZE, cb, mfn);
+ WARN_ON(!p);
+ if ( !p )
+ return NULL;
+ for ( i = 0; i < pages; i++ )
+ clear_page(p + (i * PAGE_SIZE) );
+
+ /* Note that we do not free mfn. The caller is responsible for that. */
+ return p;
+}
+
+static void arch_xsplice_vfree_cb(void *va, unsigned int pages)
+{
+ unsigned long addr = (unsigned long)va;
+
+ destroy_xen_mappings(addr, addr + pages * PAGE_SIZE);
+}
+
+/*
+ * Once the resolving symbols, performing relocations, etc is complete
+ * we secure the memory by putting in the proper page table attributes
+ * for the desired type.
+ *
+ */
+int arch_xsplice_secure(void *va, unsigned int pages, enum va_type type,
+ const mfn_t *mfn)
+{
+ unsigned long cur;
+ unsigned long start = (unsigned long)va;
+
+ ASSERT(va);
+ ASSERT(pages);
+
+ if ( type != XSPLICE_VA_RX )
+ return 0;
+
+ /*
+ * We could walk the pagetable and do the pagetable manipulations
+ * (strip the _PAGE_RW), which would mean also not needing the mfn
+ * array, but there are no generic code for this yet (TODO).
+ *
+ * For right now tear down the pagetables and recreate them.
+ */
+ arch_xsplice_vfree_cb(va, pages);
+
+ for ( cur = start; pages--; ++mfn, cur += PAGE_SIZE )
+ {
+ if ( map_pages_to_xen(start, mfn_x(*mfn), 1, PAGE_HYPERVISOR_RX) )
+ {
+ if ( cur != start )
+ destroy_xen_mappings(start, cur);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void arch_xsplice_free_payload(void *va, unsigned int pages, enum va_type type)
+{
+ if ( type == XSPLICE_VA_RX )
+ vfree_cb(va, pages, arch_xsplice_vfree_cb);
+ else
+ vfree(va);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -12,6 +12,7 @@
#include <xen/smp.h>
#include <xen/spinlock.h>
#include <xen/vmap.h>
+#include <xen/xsplice_elf.h>
#include <xen/xsplice.h>
#include <asm/event.h>
@@ -27,6 +28,9 @@ struct payload {
uint32_t state; /* One of the XSPLICE_STATE_*. */
int32_t rc; /* 0 or -XEN_EXX. */
struct list_head list; /* Linked to 'payload_list'. */
+ void *payload_address; /* Virtual address mapped. */
+ size_t payload_pages; /* Nr of the pages. */
+ mfn_t *mfn; /* Array of MFNs of the pages. */
char name[XEN_XSPLICE_NAME_SIZE + 1];/* Name of it. */
};
@@ -92,6 +96,136 @@ static int find_payload(const xen_xsplice_name_t *name, struct payload **f)
}
/*
+ * Functions related to XEN_SYSCTL_XSPLICE_UPLOAD (see xsplice_upload), and
+ * freeing payload (XEN_SYSCTL_XSPLICE_ACTION:XSPLICE_ACTION_UNLOAD).
+ */
+
+static void free_payload_data(struct payload *payload)
+{
+ /* Set to zero until "move_payload". */
+ if ( !payload->payload_address )
+ return;
+
+ xfree(payload->mfn);
+ payload->mfn = NULL;
+ arch_xsplice_free_payload(payload->payload_address,
+ payload->payload_pages, XSPLICE_VA_RX);
+
+ payload->payload_address = NULL;
+ payload->payload_pages = 0;
+}
+
+static void calc_section(struct xsplice_elf_sec *sec, size_t *size)
+{
+ size_t align_size = ROUNDUP(*size, sec->sec->sh_addralign);
+ sec->sec->sh_entsize = align_size;
+ *size = sec->sec->sh_size + align_size;
+}
+
+static int find_hole(ssize_t pages, unsigned long *hole_start,
+ unsigned long *hole_end)
+{
+ struct payload *data, *data2;
+
+ spin_lock_recursive(&payload_lock);
+ list_for_each_entry ( data, &payload_list, list )
+ {
+ list_for_each_entry ( data2, &payload_list, list )
+ {
+ unsigned long start, end;
+
+ start = (unsigned long)data2->payload_address;
+ end = start + data2->payload_pages * PAGE_SIZE;
+ if ( *hole_end > start && *hole_start < end )
+ {
+ *hole_start = end;
+ *hole_end = end + pages * PAGE_SIZE;
+ break;
+ }
+ }
+ if ( &data2->list == &payload_list )
+ break;
+ }
+ spin_unlock_recursive(&payload_lock);
+
+ return 0;
+}
+
+static int move_payload(struct payload *payload, struct xsplice_elf *elf)
+{
+ uint8_t *buf;
+ unsigned int i;
+ size_t size = 0;
+
+ /* Compute text regions. */
+ for ( i = 0; i < elf->hdr->e_shnum; i++ )
+ {
+ if ( (elf->sec[i].sec->sh_flags & (SHF_ALLOC|SHF_EXECINSTR)) ==
+ (SHF_ALLOC|SHF_EXECINSTR) )
+ calc_section(&elf->sec[i], &size);
+ }
+
+ /* Compute rw data. */
+ for ( i = 0; i < elf->hdr->e_shnum; i++ )
+ {
+ if ( (elf->sec[i].sec->sh_flags & SHF_ALLOC) &&
+ !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
+ (elf->sec[i].sec->sh_flags & SHF_WRITE) )
+ calc_section(&elf->sec[i], &size);
+ }
+
+ /* Compute ro data. */
+ for ( i = 0; i < elf->hdr->e_shnum; i++ )
+ {
+ if ( (elf->sec[i].sec->sh_flags & SHF_ALLOC) &&
+ !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
+ !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
+ calc_section(&elf->sec[i], &size);
+ }
+
+ size = PFN_UP(size);
+ buf = arch_xsplice_alloc_payload(size, XSPLICE_VA_RX, &payload->mfn);
+ if ( !buf ) {
+ printk(XENLOG_ERR "%s%s: Could not allocate memory for payload!\n",
+ XSPLICE, elf->name);
+ return -ENOMEM;
+ }
+ payload->payload_address = buf;
+ payload->payload_pages = size;
+
+ for ( i = 0; i < elf->hdr->e_shnum; i++ )
+ {
+ if ( elf->sec[i].sec->sh_flags & SHF_ALLOC )
+ {
+ elf->sec[i].load_addr = buf + elf->sec[i].sec->sh_entsize;
+
+ /* Don't copy NOBITS - such as BSS. */
+ if ( elf->sec[i].sec->sh_type != SHT_NOBITS )
+ {
+ memcpy(elf->sec[i].load_addr, elf->sec[i].data,
+ elf->sec[i].sec->sh_size);
+ dprintk(XENLOG_DEBUG, "%s%s: Loaded %s at 0x%p\n", XSPLICE,
+ elf->name, elf->sec[i].name, elf->sec[i].load_addr);
+ }
+ }
+ }
+ return 0;
+}
+
+static int secure_payload(struct payload *payload, struct xsplice_elf *elf)
+{
+ int rc;
+
+ ASSERT(payload->mfn);
+
+ rc = arch_xsplice_secure(payload->payload_address, payload->payload_pages,
+ XSPLICE_VA_RX, payload->mfn);
+ xfree(payload->mfn);
+ payload->mfn = NULL;
+ return rc;
+}
+
+/*
* We MUST be holding the payload_lock spinlock.
*/
static void free_payload(struct payload *data)
@@ -100,9 +234,51 @@ static void free_payload(struct payload *data)
list_del(&data->list);
payload_cnt--;
payload_version++;
+ free_payload_data(data);
xfree(data);
}
+static int load_payload_data(struct payload *payload, void *raw, ssize_t len)
+{
+ struct xsplice_elf elf;
+ int rc = 0;
+
+ memset(&elf, 0, sizeof(elf));
+ elf.name = payload->name;
+ elf.len = len;
+
+ rc = arch_xsplice_verify_elf(&elf, raw);
+ if ( rc )
+ return rc;
+
+ rc = xsplice_elf_load(&elf, raw);
+ if ( rc )
+ goto out;
+
+ rc = move_payload(payload, &elf);
+ if ( rc )
+ goto out;
+
+ rc = xsplice_elf_resolve_symbols(&elf);
+ if ( rc )
+ goto out;
+
+ rc = xsplice_elf_perform_relocs(&elf);
+ if ( rc )
+ goto out;
+
+ rc = secure_payload(payload, &elf);
+
+ out:
+ if ( rc )
+ free_payload_data(payload);
+
+ /* Free our temporary data structure. */
+ xsplice_elf_free(&elf);
+
+ return rc;
+}
+
static int xsplice_upload(xen_sysctl_xsplice_upload_t *upload)
{
struct payload *data = NULL;
@@ -137,6 +313,10 @@ static int xsplice_upload(xen_sysctl_xsplice_upload_t *upload)
if ( copy_from_guest(raw_data, upload->payload, upload->size) )
goto out;
+ rc = load_payload_data(data, raw_data, upload->size);
+ if ( rc )
+ goto out;
+
data->state = XSPLICE_STATE_CHECKED;
data->rc = 0;
INIT_LIST_HEAD(&data->list);
@@ -365,8 +545,9 @@ static void xsplice_printall(unsigned char key)
spin_lock_recursive(&payload_lock);
list_for_each_entry ( data, &payload_list, list )
- printk(" name=%s state=%s(%d)\n", data->name,
- state2str(data->state), data->state);
+ printk(" name=%s state=%s(%d) %p using %zu pages.\n", data->name,
+ state2str(data->state), data->state, data->payload_address,
+ data->payload_pages);
spin_unlock_recursive(&payload_lock);
}
@@ -374,6 +555,7 @@ static void xsplice_printall(unsigned char key)
static int __init xsplice_init(void)
{
register_keyhandler('x', xsplice_printall, "print xsplicing info", 1);
+ arch_xsplice_register_find_space(&find_hole);
return 0;
}
__initcall(xsplice_init);
@@ -206,6 +206,91 @@ static int elf_get_sym(struct xsplice_elf *elf, const void *data)
return 0;
}
+int xsplice_elf_resolve_symbols(struct xsplice_elf *elf)
+{
+ unsigned int i;
+
+ /*
+ * The first entry of an ELF symbol table is the "undefined symbol index".
+ * aka reserved so we skip it.
+ */
+ ASSERT( elf->sym );
+ for ( i = 1; i < elf->nsym; i++ )
+ {
+ switch ( elf->sym[i].sym->st_shndx )
+ {
+ case SHN_COMMON:
+ printk(XENLOG_ERR "%s%s: Unexpected common symbol: %s\n",
+ XSPLICE, elf->name, elf->sym[i].name);
+ return -EINVAL;
+ break;
+ case SHN_UNDEF:
+ printk(XENLOG_ERR "%s%s: Unknown symbol: %s\n",
+ XSPLICE, elf->name, elf->sym[i].name);
+ return -ENOENT;
+ break;
+ case SHN_ABS:
+ dprintk(XENLOG_DEBUG, "%s%s: Absolute symbol: %s => 0x%"PRIx64"\n",
+ XSPLICE, elf->name, elf->sym[i].name,
+ elf->sym[i].sym->st_value);
+ break;
+ default:
+ if ( elf->sec[elf->sym[i].sym->st_shndx].sec->sh_flags & SHF_ALLOC )
+ {
+ elf->sym[i].sym->st_value +=
+ (unsigned long)elf->sec[elf->sym[i].sym->st_shndx].load_addr;
+ if ( elf->sym[i].name )
+ printk(XENLOG_DEBUG "%s%s: Symbol resolved: %s => 0x%"PRIx64"\n",
+ XSPLICE, elf->name, elf->sym[i].name,
+ elf->sym[i].sym->st_value);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int xsplice_elf_perform_relocs(struct xsplice_elf *elf)
+{
+ struct xsplice_elf_sec *rela, *base;
+ unsigned int i;
+ int rc;
+
+ /*
+ * The first entry of an ELF symbol table is the "undefined symbol index".
+ * aka reserved so we skip it.
+ */
+ ASSERT( elf->sym );
+ for ( i = 1; i < elf->hdr->e_shnum; i++ )
+ {
+ rela = &elf->sec[i];
+
+ if ( (rela->sec->sh_type != SHT_RELA ) &&
+ (rela->sec->sh_type != SHT_REL ) )
+ continue;
+
+ /* Is it a valid relocation section? */
+ if ( rela->sec->sh_info >= elf->hdr->e_shnum )
+ continue;
+
+ base = &elf->sec[rela->sec->sh_info];
+
+ /* Don't relocate non-allocated sections. */
+ if ( !(base->sec->sh_flags & SHF_ALLOC) )
+ continue;
+
+ if ( elf->sec[i].sec->sh_type == SHT_RELA )
+ rc = arch_xsplice_perform_rela(elf, base, rela);
+ else /* SHT_REL */
+ rc = arch_xsplice_perform_rel(elf, base, rela);
+
+ if ( rc )
+ return rc;
+ }
+
+ return 0;
+}
+
static int xsplice_header_check(const struct xsplice_elf *elf)
{
if ( sizeof(*elf->hdr) >= elf->len )
@@ -38,6 +38,8 @@
#include <xen/pdx.h>
extern unsigned long xen_virt_end;
+extern unsigned long avail_virt_start;
+extern unsigned long avail_virt_end;
#define spage_to_pdx(spg) (((spg) - spage_table)<<(SUPERPAGE_SHIFT-PAGE_SHIFT))
#define pdx_to_spage(pdx) (spage_table + ((pdx)>>(SUPERPAGE_SHIFT-PAGE_SHIFT)))
@@ -6,6 +6,9 @@
#ifndef __XEN_XSPLICE_H__
#define __XEN_XSPLICE_H__
+struct xsplice_elf;
+struct xsplice_elf_sec;
+struct xsplice_elf_sym;
struct xen_sysctl_xsplice_op;
#ifdef CONFIG_XSPLICE
@@ -15,6 +18,47 @@ struct xen_sysctl_xsplice_op;
int xsplice_op(struct xen_sysctl_xsplice_op *);
+/* Arch hooks. */
+int arch_xsplice_verify_elf(const struct xsplice_elf *elf, void *data);
+int arch_xsplice_perform_rel(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela);
+int arch_xsplice_perform_rela(struct xsplice_elf *elf,
+ const struct xsplice_elf_sec *base,
+ const struct xsplice_elf_sec *rela);
+enum va_type {
+ XSPLICE_VA_RX, /* .text */
+ XSPLICE_VA_RW, /* Everything else. */
+};
+
+#include <xen/mm.h>
+void *arch_xsplice_alloc_payload(unsigned int pages, enum va_type, mfn_t **mfn);
+
+/*
+ * Function to secure the allocate pages (from arch_xsplice_alloc_payload)
+ * with the right page permissions.
+ */
+int arch_xsplice_secure(void *va, unsigned int pages, enum va_type type,
+ const mfn_t *mfn);
+
+void arch_xsplice_free_payload(void *va, unsigned int pages, enum va_type);
+
+/*
+ * Callback to find available virtual address space in which the
+ * payload could be put in.
+ *
+ * The arguments are:
+ * - The size of the payload in bytes.
+ * - The starting virtual address to search. To be updated by
+ * callback if space found.
+ * - The ending virtual address to search. To be updated by
+ * callback if space found.
+ *
+ * The return value is zero if search was done. -EXX values
+ * if errors were encountered.
+ */
+typedef int (*find_space_t)(ssize_t, unsigned long *, unsigned long *);
+void arch_xsplice_register_find_space(find_space_t cb);
#else
#include <xen/errno.h> /* For -ENOSYS */
@@ -15,6 +15,8 @@ struct xsplice_elf_sec {
elf_resolve_section_names. */
const void *data; /* Pointer to the section (done by
elf_resolve_sections). */
+ uint8_t *load_addr; /* A pointer to the allocated destination.
+ Done by load_payload_data. */
};
struct xsplice_elf_sym {
@@ -38,6 +40,9 @@ struct xsplice_elf_sec *xsplice_elf_sec_by_name(const struct xsplice_elf *elf,
int xsplice_elf_load(struct xsplice_elf *elf, void *data);
void xsplice_elf_free(struct xsplice_elf *elf);
+int xsplice_elf_resolve_symbols(struct xsplice_elf *elf);
+int xsplice_elf_perform_relocs(struct xsplice_elf *elf);
+
#endif /* __XEN_XSPLICE_ELF_H__ */
/*