@@ -97,6 +97,8 @@ static inline void crash_post_resume(void) {}
* kernel, or purgatory entry address).
* kern_arg0 first argument to kernel is its dtb address. The other
* arguments are currently unused, and must be set to 0
+ * trans_table: idmap for source and destination pages, as well as for
+ * relocation text.
*/
struct kern_reloc_arg {
unsigned long head;
@@ -105,6 +107,7 @@ struct kern_reloc_arg {
unsigned long kern_arg1;
unsigned long kern_arg2;
unsigned long kern_arg3;
+ unsigned long trans_table;
};
#define ARCH_HAS_KIMAGE_ARCH
@@ -135,6 +135,7 @@ int main(void)
DEFINE(KRELOC_KERN_ARG1, offsetof(struct kern_reloc_arg, kern_arg1));
DEFINE(KRELOC_KERN_ARG2, offsetof(struct kern_reloc_arg, kern_arg2));
DEFINE(KRELOC_KERN_ARG3, offsetof(struct kern_reloc_arg, kern_arg3));
+ DEFINE(KRELOC_TRANS_TABLE, offsetof(struct kern_reloc_arg, trans_table));
#endif
return 0;
}
@@ -20,6 +20,7 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/trans_table.h>
#include "cpu-reset.h"
@@ -72,11 +73,96 @@ static void *kexec_page_alloc(void *arg)
return page_address(page);
}
+/*
+ * idmap every segment that needs to be relocated. We map pages for
+ * destination, source, and also array that holds source, and destination
+ * addresses.
+ * Ideally, we could linearly map src and dst addresses, so, in relocation
+ * routine we would need to only do memcpy(dst, src, len), but this is not
+ * possible, because on armv8.0 EL2 does not have ttbr1, and thus we might
+ * not have enough linear VA range. So, simply idmap it here, that works
+ * for both EL1, and EL2. Note: we cannot really do relocation in EL1 and
+ * later upgrade to EL2 because old world is erased, so there is no where
+ * to trap.
+ */
+static int map_segments(struct kimage *kimage, pgd_t *pgdp,
+ struct trans_table_info *info)
+{
+ unsigned long *ptr = 0;
+ unsigned long dest = 0;
+ unsigned long entry, addr;
+ int rc;
+
+ for (entry = kimage->head; !(entry & IND_DONE); entry = *ptr++) {
+ addr = entry & PAGE_MASK;
+
+ switch (entry & IND_FLAGS) {
+ case IND_DESTINATION:
+ dest = addr;
+ break;
+ case IND_INDIRECTION:
+ ptr = __va(addr);
+ rc = trans_table_map_page(info, pgdp, ptr,
+ addr, PAGE_KERNEL);
+ if (rc)
+ return rc;
+ break;
+ case IND_SOURCE:
+ rc = trans_table_map_page(info, pgdp, __va(addr),
+ addr, PAGE_KERNEL);
+ if (rc)
+ return rc;
+ rc = trans_table_map_page(info, pgdp, __va(dest),
+ dest, PAGE_KERNEL);
+ if (rc)
+ return rc;
+ dest += PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+static int mmu_relocate_setup(struct kimage *kimage, unsigned long kern_reloc,
+ struct kern_reloc_arg *kern_reloc_arg)
+{
+ struct trans_table_info info = {
+ .trans_alloc_page = kexec_page_alloc,
+ .trans_alloc_arg = kimage,
+ .trans_flags = 0,
+ };
+ pgd_t *trans_table;
+ int rc;
+
+ rc = trans_table_create_empty(&info, &trans_table);
+ if (rc)
+ return rc;
+
+ rc = map_segments(kimage, trans_table, &info);
+ if (rc)
+ return rc;
+
+ /* Map relocation function va == pa */
+ rc = trans_table_map_page(&info, trans_table, __va(kern_reloc),
+ kern_reloc, PAGE_KERNEL_EXEC);
+ if (rc)
+ return rc;
+
+ /* Map relocation function argument va == pa */
+ rc = trans_table_map_page(&info, trans_table, kern_reloc_arg,
+ __pa(kern_reloc_arg), PAGE_KERNEL);
+ if (rc)
+ return rc;
+
+ kern_reloc_arg->trans_table = __pa(trans_table);
+
+ return 0;
+}
int machine_kexec_post_load(struct kimage *kimage)
{
unsigned long kern_reloc;
struct kern_reloc_arg *kern_reloc_arg;
+ int rc = 0;
kern_reloc = page_to_phys(kimage->control_code_page);
memcpy(__va(kern_reloc), arm64_relocate_new_kernel,
@@ -94,8 +180,16 @@ int machine_kexec_post_load(struct kimage *kimage)
kern_reloc_arg->entry_addr = kimage->start;
kern_reloc_arg->kern_arg0 = kimage->arch.dtb_mem;
+ /*
+ * If relocation is not needed, we do not need to enable MMU in
+ * relocation routine, therefore do not create page tables for
+ * scenarios such as crash kernel
+ */
+ if (!(kimage->head & IND_DONE))
+ rc = mmu_relocate_setup(kimage, kern_reloc, kern_reloc_arg);
+
kexec_image_info(kimage);
- return 0;
+ return rc;
}
/**
Configure a page table located in kexec-safe memory that has the following mappings: 1. mapping for text of relocation function with executable permission. 2. mapping for argument for relocation function. 3. mappings for all source ranges 4. mappings for all destination ranges. 5. mappings for array that contains information about source/destinations. We could make this page table to contain liner addresses, but instead do identity maps (va == pa) for every mapping. This is because the relocation code can be executed at EV2, where ttbr1 might not be available. There is no way to execute relocation code at EV1, because the old world is overwritten and thus no place to trap to to escalator to EV2. Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com> --- arch/arm64/include/asm/kexec.h | 3 + arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/machine_kexec.c | 96 ++++++++++++++++++++++++++++++- 3 files changed, 99 insertions(+), 1 deletion(-)