@@ -3,21 +3,111 @@
*/
#include <xen/errno.h>
+#include <xen/kernel.h>
#include <xen/lib.h>
#include <xen/livepatch_elf.h>
#include <xen/livepatch.h>
+#include <asm/page.h>
+#include <asm/livepatch.h>
+
void arch_livepatch_apply(struct livepatch_func *func)
{
+ uint32_t insn;
+ uint32_t *new_ptr;
+ unsigned int i, len;
+
+ BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE > sizeof(func->opaque));
+ BUILD_BUG_ON(ARCH_PATCH_INSN_SIZE != sizeof(insn));
+
+ ASSERT(vmap_of_xen_text);
+
+ len = livepatch_insn_len(func);
+ if ( !len )
+ return;
+
+ /* Save old ones. */
+ memcpy(func->opaque, func->old_addr, len);
+
+ if ( func->new_addr )
+ {
+ s32 delta;
+
+ /*
+ * PC is current address (old_addr) + 8 bytes. The semantics for a
+ * unconditional branch is to jump to PC + imm32 (offset).
+ *
+ * ARM DDI 0406C.c, see A2.3 (pg 45) and A8.8.18 pg (pg 334,335)
+ *
+ */
+ delta = (s32)func->new_addr - (s32)(func->old_addr + 8);
+
+ /* The arch_livepatch_symbol_ok should have caught it. */
+ ASSERT(delta >= -(s32)ARCH_LIVEPATCH_RANGE ||
+ delta < (s32)ARCH_LIVEPATCH_RANGE);
+
+ /* CPU shifts by two (left) when decoding, so we shift right by two. */
+ delta = delta >> 2;
+ /* Lets not modify the cond. */
+ delta &= 0x00FFFFFF;
+
+ insn = 0xea000000 | delta;
+ }
+ else
+ insn = 0xe1a00000; /* mov r0, r0 */
+
+ new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+ len = len / sizeof(uint32_t);
+
+ /* PATCH! */
+ for ( i = 0; i < len; i++ )
+ *(new_ptr + i) = insn;
+
+ /*
+ * When we upload the payload, it will go through the data cache
+ * (the region is cacheable). Until the data cache is cleaned, the data
+ * may not reach the memory. And in the case the data and instruction cache
+ * are separated, we may read invalid instruction from the memory because
+ * the data cache have not yet synced with the memory. Hence sync it.
+ */
+ if ( func->new_addr )
+ clean_and_invalidate_dcache_va_range(func->new_addr, func->new_size);
+ clean_and_invalidate_dcache_va_range(new_ptr, sizeof (*new_ptr) * len);
}
void arch_livepatch_revert(const struct livepatch_func *func)
{
+ uint32_t *new_ptr;
+ unsigned int len;
+
+ new_ptr = func->old_addr - (void *)_start + vmap_of_xen_text;
+
+ len = livepatch_insn_len(func);
+ memcpy(new_ptr, func->opaque, len);
+
+ clean_and_invalidate_dcache_va_range(new_ptr, len);
}
int arch_livepatch_verify_elf(const struct livepatch_elf *elf)
{
- return -EOPNOTSUPP;
+ const Elf_Ehdr *hdr = elf->hdr;
+
+ if ( hdr->e_machine != EM_ARM ||
+ hdr->e_ident[EI_CLASS] != ELFCLASS32 )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF Machine type!\n",
+ elf->name);
+ return -EOPNOTSUPP;
+ }
+
+ if ( (hdr->e_flags & EF_ARM_EABI_MASK) != EF_ARM_EABI_VER5 )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Unsupported ELF EABI(%x)!\n",
+ elf->name, hdr->e_flags);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf,
@@ -33,11 +123,193 @@ bool arch_livepatch_symbol_deny(const struct livepatch_elf *elf,
return false;
}
+static s32 get_addend(unsigned char type, void *dest)
+{
+ s32 addend = 0;
+
+ switch ( type ) {
+ case R_ARM_NONE:
+ /* ignore */
+ break;
+
+ case R_ARM_ABS32:
+ addend = *(u32 *)dest;
+ break;
+
+ case R_ARM_REL32:
+ addend = *(u32 *)dest;
+ break;
+
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ addend = (*(u32 *)dest & 0x00000FFF);
+ addend |= (*(u32 *)dest & 0x000F0000) >> 4;
+ /* Addend is to sign-extend ([19:16],[11:0]). */
+ addend = (s16)addend;
+ break;
+
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
+ /* Addend = sign_extend (insn[23:0]) << 2 */
+ addend = ((*(u32 *)dest & 0xFFFFFF) ^ 0x800000) - 0x800000;
+ addend = addend << 2;
+ break;
+ }
+
+ return addend;
+}
+
+static int perform_rel(unsigned char type, void *dest, uint32_t val, s32 addend)
+{
+
+ switch ( type ) {
+ case R_ARM_NONE:
+ /* ignore */
+ break;
+
+ case R_ARM_ABS32: /* (S + A) | T */
+ *(u32 *)dest = (val + addend);
+ break;
+
+ case R_ARM_REL32: /* ((S + A) | T) – P */
+ *(u32 *)dest = (val + addend) - (uint32_t)dest;
+ break;
+
+ case R_ARM_MOVW_ABS_NC: /* S + A */
+ case R_ARM_MOVT_ABS: /* S + A */
+ /* Clear addend if needed . */
+ if ( addend )
+ *(u32 *)dest &= 0xFFF0F000;
+
+ if ( type == R_ARM_MOVT_ABS )
+ {
+ /*
+ * Almost the same as MOVW except it uses the 16 bit
+ * high value. Putting it in insn requires shifting right by
+ * 16-bit (as we only have 16-bit for imm.
+ */
+ val &= 0xFFFF0000; /* ResultMask */
+ val = val >> 16;
+ }
+ else
+ {
+ /* MOVW loads 16 bits into the bottom half of a register. */
+ val &= 0xFFFF;
+ }
+ /* [11:0] = Result_Mask(X) & 0xFFF,[19:16] = Result_Mask(X) >> 12 */
+ *(u32 *)dest |= val & 0xFFF;
+ *(u32 *)dest |= (val >> 12) << 16;
+ break;
+
+ case R_ARM_CALL:
+ case R_ARM_JUMP24: /* (S + A) - P */
+ /* Clear the old addend. */
+ if ( addend )
+ *(u32 *)dest &= 0xFF000000;
+
+ val += addend - (uint32_t)dest;
+
+ /*
+ * arch_livepatch_verify_distance can't account of addend so we have
+ * to do the check here as well.
+ */
+ if ( (s32)val < -(s32)ARCH_LIVEPATCH_RANGE ||
+ (s32)val >= (s32)ARCH_LIVEPATCH_RANGE )
+ return -EOVERFLOW;
+
+ /* CPU always shifts insn by two, so complement it. */
+ val = val >> 2;
+ val &= 0x00FFFFFE;
+ *(u32 *)dest |= (uint32_t)val;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int arch_livepatch_perform(struct livepatch_elf *elf,
+ const struct livepatch_elf_sec *base,
+ const struct livepatch_elf_sec *rela,
+ bool use_rela)
+{
+ const Elf_RelA *r_a;
+ const Elf_Rel *r;
+ unsigned int symndx, i;
+ uint32_t val;
+ void *dest;
+ int rc = 0;
+
+ for ( i = 0; i < (rela->sec->sh_size / rela->sec->sh_entsize); i++ )
+ {
+ unsigned char type;
+ s32 addend = 0;
+
+ if ( use_rela )
+ {
+ r_a = rela->data + i * rela->sec->sh_entsize;
+ symndx = ELF32_R_SYM(r_a->r_info);
+ type = ELF32_R_TYPE(r_a->r_info);
+ dest = base->load_addr + r_a->r_offset; /* P */
+ addend = r_a->r_addend;
+ }
+ else
+ {
+ r = rela->data + i * rela->sec->sh_entsize;
+ symndx = ELF32_R_SYM(r->r_info);
+ type = ELF32_R_TYPE(r->r_info);
+ dest = base->load_addr + r->r_offset; /* P */
+ }
+
+ if ( symndx > elf->nsym )
+ {
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Relative symbol wants symbol@%u which is past end!\n",
+ elf->name, symndx);
+ return -EINVAL;
+ }
+
+ if ( !use_rela )
+ addend = get_addend(type, dest);
+
+ val = elf->sym[symndx].sym->st_value; /* S */
+
+ rc = perform_rel(type, dest, val, addend);
+ switch ( rc ) {
+ case -EOVERFLOW:
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Overflow in relocation %u in %s for %s!\n",
+ elf->name, i, rela->name, base->name);
+ break;
+
+ case -EOPNOTSUPP:
+ dprintk(XENLOG_ERR, LIVEPATCH "%s: Unhandled relocation #%x\n",
+ elf->name, type);
+ break;
+
+ default:
+ break;
+ }
+
+ if ( rc )
+ break;
+ }
+
+ return rc;
+}
+
+int arch_livepatch_perform_rel(struct livepatch_elf *elf,
+ const struct livepatch_elf_sec *base,
+ const struct livepatch_elf_sec *rela)
+{
+ return arch_livepatch_perform(elf, base, rela, false);
+}
+
int arch_livepatch_perform_rela(struct livepatch_elf *elf,
const struct livepatch_elf_sec *base,
const struct livepatch_elf_sec *rela)
{
- return -ENOSYS;
+ return arch_livepatch_perform(elf, base, rela, true);
}
/*
@@ -241,6 +241,13 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *dest, u64 val,
return 0;
}
+int arch_livepatch_perform_rel(struct livepatch_elf *elf,
+ const struct livepatch_elf_sec *base,
+ const struct livepatch_elf_sec *rela)
+{
+ return -ENOSYS;
+}
+
int arch_livepatch_perform_rela(struct livepatch_elf *elf,
const struct livepatch_elf_sec *base,
const struct livepatch_elf_sec *rela)
@@ -118,13 +118,6 @@ bool arch_livepatch_symbol_ok(const struct livepatch_elf *elf,
return true;
}
-int arch_livepatch_perform_rel(struct livepatch_elf *elf,
- const struct livepatch_elf_sec *base,
- const struct livepatch_elf_sec *rela)
-{
- return -ENOSYS;
-}
-
int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type)
{
unsigned long start = (unsigned long)va;
@@ -217,7 +217,7 @@ config CRYPTO
config LIVEPATCH
bool "Live patching support (TECH PREVIEW)"
default n
- depends on !ARM_32 && HAS_BUILD_ID = "y"
+ depends on HAS_BUILD_ID = "y"
---help---
Allows a running Xen hypervisor to be dynamically patched using
binary patches without rebooting. This is primarily used to binarily
@@ -103,6 +103,15 @@ typedef uint64_t Elf64_Xword;
(ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
(ehdr).e_ident[EI_MAG3] == ELFMAG3)
+/* e_flags */
+#define EF_ARM_EABI_MASK 0xff000000
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
/* ELF Header */
typedef struct elfhdr {
unsigned char e_ident[EI_NIDENT]; /* ELF Identification */
@@ -364,9 +373,22 @@ typedef struct {
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
/*
+ * ARM32 relocation types. See
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044f/IHI0044F_aaelf.pdf
* S - address of symbol.
- * A - addend for relocation (r_addend)
+ * A - addend for relocation (r_addend or need to extract from insn)
* P - address of the dest being relocated (derieved from r_offset)
+ */
+#define R_ARM_NONE 0
+#define R_ARM_ABS32 2 /* Direct 32-bit. S+A */
+#define R_ARM_REL32 3 /* PC relative. S+A */
+#define R_ARM_CALL 28 /* SignExtend([23:0]) << 2. S+A-P */
+#define R_ARM_JUMP24 29 /* Same as R_ARM_CALL */
+#define R_ARM_MOVW_ABS_NC 43 /* SignExtend([19:16],[11:0])&0xFFFF, S+A */
+#define R_ARM_MOVT_ABS 44 /* SignExtend([19:16],[11:0))&0xFFFF0000 */
+ /* >> 16, S+A. */
+
+/*
* NC - No check for overflow.
*
* The defines also use _PREL for PC-relative address, and _NC is No Check.
@@ -1,8 +1,6 @@
.PHONY: tests
tests:
-ifneq $(XEN_TARGET_ARCH),arm32)
$(MAKE) -f $(BASEDIR)/Rules.mk -C livepatch livepatch
-endif
.PHONY: clean
clean::
@@ -6,6 +6,9 @@ endif
ifeq ($(XEN_TARGET_ARCH),arm64)
OBJCOPY_MAGIC := -I binary -O elf64-littleaarch64 -B aarch64
endif
+ifeq ($(XEN_TARGET_ARCH),arm32)
+OBJCOPY_MAGIC := -I binary -O elf32-littlearm -B arm
+endif
CODE_ADDR=$(shell nm --defined $(1) | grep $(2) | awk '{print "0x"$$1}')
CODE_SZ=$(shell nm --defined -S $(1) | grep $(2) | awk '{ print "0x"$$2}')