@@ -6,5 +6,8 @@
#include "kvm/kvm.h"
static inline void kvm_arm_realm_create_realm_descriptor(struct kvm *kvm) {}
+static inline void kvm_arm_realm_populate_kernel(struct kvm *kvm) {}
+static inline void kvm_arm_realm_populate_initrd(struct kvm *kvm) {}
+static inline void kvm_arm_realm_populate_dtb(struct kvm *kvm) {}
#endif /* ! __ASM_REALM_H */
@@ -6,5 +6,8 @@
#include "kvm/kvm.h"
void kvm_arm_realm_create_realm_descriptor(struct kvm *kvm);
+void kvm_arm_realm_populate_kernel(struct kvm *kvm);
+void kvm_arm_realm_populate_initrd(struct kvm *kvm);
+void kvm_arm_realm_populate_dtb(struct kvm *kvm);
#endif /* ! __ASM_REALM_H */
@@ -1,5 +1,7 @@
#include "kvm/kvm.h"
+#include <linux/byteorder.h>
+#include <asm/image.h>
#include <asm/realm.h>
@@ -80,3 +82,113 @@ void kvm_arm_realm_create_realm_descriptor(struct kvm *kvm)
if (ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &rme_create_rd) < 0)
die_perror("KVM_CAP_RME(KVM_CAP_ARM_RME_CREATE_RD)");
}
+
+static void realm_init_ipa_range(struct kvm *kvm, u64 start, u64 size)
+{
+ struct kvm_cap_arm_rme_init_ipa_args init_ipa_args = {
+ .init_ipa_base = start,
+ .init_ipa_size = size
+ };
+ struct kvm_enable_cap rme_init_ipa_realm = {
+ .cap = KVM_CAP_ARM_RME,
+ .args[0] = KVM_CAP_ARM_RME_INIT_IPA_REALM,
+ .args[1] = (u64)&init_ipa_args
+ };
+
+ if (ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &rme_init_ipa_realm) < 0)
+ die("unable to intialise IPA range for Realm %llx - %llx (size %llu)",
+ start, start + size, size);
+
+}
+
+static void __realm_populate(struct kvm *kvm, u64 start, u64 size)
+{
+ struct kvm_cap_arm_rme_populate_realm_args populate_args = {
+ .populate_ipa_base = start,
+ .populate_ipa_size = size
+ };
+ struct kvm_enable_cap rme_populate_realm = {
+ .cap = KVM_CAP_ARM_RME,
+ .args[0] = KVM_CAP_ARM_RME_POPULATE_REALM,
+ .args[1] = (u64)&populate_args
+ };
+
+ if (ioctl(kvm->vm_fd, KVM_ENABLE_CAP, &rme_populate_realm) < 0)
+ die("unable to populate Realm memory %llx - %llx (size %llu)",
+ start, start + size, size);
+}
+
+static void realm_populate(struct kvm *kvm, u64 start, u64 size)
+{
+ realm_init_ipa_range(kvm, start, size);
+ __realm_populate(kvm, start, size);
+}
+
+static bool is_arm64_linux_kernel_image(void *header)
+{
+ struct arm64_image_header *hdr = header;
+
+ return memcmp(&hdr->magic, ARM64_IMAGE_MAGIC, sizeof(hdr->magic)) == 0;
+}
+
+static ssize_t arm64_linux_kernel_image_size(void *header)
+{
+ struct arm64_image_header *hdr = header;
+
+ if (is_arm64_linux_kernel_image(header))
+ return le64_to_cpu(hdr->image_size);
+ die("Not arm64 Linux kernel Image");
+}
+
+void kvm_arm_realm_populate_kernel(struct kvm *kvm)
+{
+ u64 start, end, mem_size;
+ void *header = guest_flat_to_host(kvm, kvm->arch.kern_guest_start);
+
+ start = ALIGN_DOWN(kvm->arch.kern_guest_start, SZ_4K);
+ end = ALIGN(kvm->arch.kern_guest_start + kvm->arch.kern_size, SZ_4K);
+
+ if (is_arm64_linux_kernel_image(header))
+ mem_size = arm64_linux_kernel_image_size(header);
+ else
+ mem_size = end - start;
+
+ realm_init_ipa_range(kvm, start, mem_size);
+ __realm_populate(kvm, start, end - start);
+}
+
+void kvm_arm_realm_populate_initrd(struct kvm *kvm)
+{
+ u64 kernel_end, start, end;
+
+ kernel_end = ALIGN(kvm->arch.kern_guest_start + kvm->arch.kern_size, SZ_4K);
+ start = ALIGN_DOWN(kvm->arch.initrd_guest_start, SZ_4K);
+ /*
+ * Because we align the initrd to 4 bytes, it is theoretically possible
+ * for the start of the initrd to overlap with the same page where the
+ * kernel ends.
+ */
+ if (start < kernel_end)
+ start = kernel_end;
+ end = ALIGN(kvm->arch.initrd_guest_start + kvm->arch.initrd_size, SZ_4K);
+ if (end > start)
+ realm_populate(kvm, start, end - start);
+}
+
+void kvm_arm_realm_populate_dtb(struct kvm *kvm)
+{
+ u64 initrd_end, start, end;
+
+ initrd_end = ALIGN(kvm->arch.initrd_guest_start + kvm->arch.initrd_size, SZ_4K);
+ start = ALIGN_DOWN(kvm->arch.dtb_guest_start, SZ_4K);
+ /*
+ * Same situation as with the initrd, but now it is the DTB which is
+ * overlapping with the last page of the initrd, because the initrd is
+ * populated first.
+ */
+ if (start < initrd_end)
+ start = initrd_end;
+ end = ALIGN(kvm->arch.dtb_guest_start + FDT_MAX_SIZE, SZ_4K);
+ if (end > start)
+ realm_populate(kvm, start, end - start);
+}
@@ -7,6 +7,8 @@
#include "arm-common/gic.h"
#include "arm-common/pci.h"
+#include <asm/realm.h>
+
#include <stdbool.h>
#include <linux/byteorder.h>
@@ -231,6 +233,10 @@ static int setup_fdt(struct kvm *kvm)
if (kvm->cfg.arch.dump_dtb_filename)
dump_fdt(kvm->cfg.arch.dump_dtb_filename, fdt_dest);
+
+ if (kvm->cfg.arch.is_realm)
+ kvm_arm_realm_populate_dtb(kvm);
+
return 0;
}
late_init(setup_fdt);
@@ -6,6 +6,7 @@
#include "kvm/fdt.h"
#include "arm-common/gic.h"
+#include <asm/realm.h>
#include <sys/resource.h>
@@ -167,6 +168,9 @@ bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
pr_debug("Loaded kernel to 0x%llx (%llu bytes)",
kvm->arch.kern_guest_start, kvm->arch.kern_size);
+ if (kvm->cfg.arch.is_realm)
+ kvm_arm_realm_populate_kernel(kvm);
+
/*
* Now load backwards from the end of memory so the kernel
* decompressor has plenty of space to work with. First up is
@@ -188,7 +192,6 @@ bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
/* ... and finally the initrd, if we have one. */
if (fd_initrd != -1) {
struct stat sb;
- unsigned long initrd_start;
if (fstat(fd_initrd, &sb))
die_perror("fstat");
@@ -199,7 +202,6 @@ bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
if (pos < kernel_end)
die("initrd overlaps with kernel image.");
- initrd_start = guest_addr;
file_size = read_file(fd_initrd, pos, limit - pos);
if (file_size == -1) {
if (errno == ENOMEM)
@@ -208,11 +210,13 @@ bool kvm__arch_load_kernel_image(struct kvm *kvm, int fd_kernel, int fd_initrd,
die_perror("initrd read");
}
- kvm->arch.initrd_guest_start = initrd_start;
+ kvm->arch.initrd_guest_start = guest_addr;
kvm->arch.initrd_size = file_size;
pr_debug("Loaded initrd to 0x%llx (%llu bytes)",
- kvm->arch.initrd_guest_start,
- kvm->arch.initrd_size);
+ kvm->arch.initrd_guest_start, kvm->arch.initrd_size);
+
+ if (kvm->cfg.arch.is_realm)
+ kvm_arm_realm_populate_initrd(kvm);
} else {
kvm->arch.initrd_size = 0;
}
@@ -269,6 +273,8 @@ bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename)
/* Kernel isn't loaded by kvm, point start address to firmware */
kvm->arch.kern_guest_start = fw_addr;
+ kvm->arch.kern_size = fw_sz;
+
pr_debug("Loaded firmware to 0x%llx (%zd bytes)",
kvm->arch.kern_guest_start, fw_sz);
@@ -283,6 +289,10 @@ bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename)
kvm->arch.dtb_guest_start,
kvm->arch.dtb_guest_start + FDT_MAX_SIZE);
+ if (kvm->cfg.arch.is_realm)
+ /* We hijack the kernel fields to describe the firmware. */
+ kvm_arm_realm_populate_kernel(kvm);
+
return true;
}
@@ -9,6 +9,7 @@
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define ALIGN_DOWN(x,a) __ALIGN_MASK(x - (typeof(x))((a) - 1),(typeof(x))(a)-1)
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)