@@ -24,6 +24,7 @@ enum {
GZVM_FUNC_INFORM_EXIT = 14,
GZVM_FUNC_MEMREGION_PURPOSE = 15,
GZVM_FUNC_SET_DTB_CONFIG = 16,
+ GZVM_FUNC_MAP_GUEST = 17,
NR_GZVM_FUNC,
};
@@ -48,6 +49,7 @@ enum {
#define MT_HVC_GZVM_INFORM_EXIT GZVM_HCALL_ID(GZVM_FUNC_INFORM_EXIT)
#define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE)
#define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG)
+#define MT_HVC_GZVM_MAP_GUEST GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST)
#define GIC_V3_NR_LRS 16
@@ -378,15 +378,28 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp)
{
+ struct arm_smccc_res res = {0};
int ret;
switch (cap->cap) {
case GZVM_CAP_PROTECTED_VM:
ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
return ret;
+ case GZVM_CAP_ENABLE_DEMAND_PAGING:
+ ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
+ return ret;
default:
break;
}
return -EINVAL;
}
+
+int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
+ u64 nr_pages)
+{
+ struct arm_smccc_res res;
+
+ return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
+ pfn, gfn, nr_pages, 0, 0, &res);
+}
@@ -8,4 +8,5 @@ GZVM_DIR ?= ../../../drivers/virt/geniezone
gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
$(GZVM_DIR)/gzvm_vcpu.o $(GZVM_DIR)/gzvm_irqfd.o \
- $(GZVM_DIR)/gzvm_ioeventfd.o $(GZVM_DIR)/gzvm_mmu.o
+ $(GZVM_DIR)/gzvm_ioeventfd.o $(GZVM_DIR)/gzvm_mmu.o \
+ $(GZVM_DIR)/gzvm_exception.o
new file mode 100644
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/soc/mediatek/gzvm_drv.h>
+
+/**
+ * gzvm_handle_guest_exception() - Handle guest exception
+ * @vcpu: Pointer to struct gzvm_vcpu_run in userspace
+ * Return:
+ * * true - This exception has been processed, no need to back to VMM.
+ * * false - This exception has not been processed, require userspace.
+ */
+bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu)
+{
+ int ret;
+
+ for (int i = 0; i < ARRAY_SIZE(vcpu->run->exception.reserved); i++) {
+ if (vcpu->run->exception.reserved[i])
+ return false;
+ }
+
+ switch (vcpu->run->exception.exception) {
+ case GZVM_EXCEPTION_PAGE_FAULT:
+ ret = gzvm_handle_page_fault(vcpu);
+ break;
+ case GZVM_EXCEPTION_UNKNOWN:
+ fallthrough;
+ default:
+ ret = -EFAULT;
+ }
+
+ if (!ret)
+ return true;
+ else
+ return false;
+}
@@ -36,6 +36,8 @@ int gzvm_err_to_errno(unsigned long err)
return 0;
case ERR_NO_MEMORY:
return -ENOMEM;
+ case ERR_INVALID_ARGS:
+ return -EINVAL;
case ERR_NOT_SUPPORTED:
fallthrough;
case ERR_NOT_IMPLEMENTED:
@@ -101,3 +101,44 @@ int gzvm_vm_allocate_guest_page(struct gzvm *vm, struct gzvm_memslot *slot,
return 0;
}
+
+static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
+{
+ int ret;
+ u64 pfn;
+
+ ret = gzvm_vm_allocate_guest_page(vm, &vm->memslot[memslot_id], gfn, &pfn);
+ if (unlikely(ret))
+ return -EFAULT;
+
+ ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
+ if (unlikely(ret))
+ return -EFAULT;
+ return ret;
+}
+
+/**
+ * gzvm_handle_page_fault() - Handle guest page fault, find corresponding page
+ * for the faulting gpa
+ * @vcpu: Pointer to struct gzvm_vcpu_run of the faulting vcpu
+ *
+ * Return:
+ * * 0 - Success to handle guest page fault
+ * * -EFAULT - Failed to map phys addr to guest's GPA
+ */
+int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu)
+{
+ struct gzvm *vm = vcpu->gzvm;
+ int memslot_id;
+ u64 gfn;
+
+ gfn = PHYS_PFN(vcpu->run->exception.fault_gpa);
+ memslot_id = gzvm_find_memslot(vm, gfn);
+ if (unlikely(memslot_id < 0))
+ return -EFAULT;
+
+ if (unlikely(vm->mem_alloc_mode == GZVM_FULLY_POPULATED))
+ return -EFAULT;
+
+ return handle_single_demand_page(vm, memslot_id, gfn);
+}
@@ -112,9 +112,11 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
* it's geniezone's responsibility to fill corresponding data
* structure
*/
- case GZVM_EXIT_HYPERCALL:
- fallthrough;
case GZVM_EXIT_EXCEPTION:
+ if (!gzvm_handle_guest_exception(vcpu))
+ need_userspace = true;
+ break;
+ case GZVM_EXIT_HYPERCALL:
fallthrough;
case GZVM_EXIT_DEBUG:
fallthrough;
@@ -29,6 +29,31 @@ int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
return 0;
}
+/**
+ * gzvm_find_memslot() - Find memslot containing this @gpa
+ * @vm: Pointer to struct gzvm
+ * @gfn: Guest frame number
+ *
+ * Return:
+ * * >=0 - Index of memslot
+ * * -EFAULT - Not found
+ */
+int gzvm_find_memslot(struct gzvm *vm, u64 gfn)
+{
+ int i;
+
+ for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
+ if (vm->memslot[i].npages == 0)
+ continue;
+
+ if (gfn >= vm->memslot[i].base_gfn &&
+ gfn < vm->memslot[i].base_gfn + vm->memslot[i].npages)
+ return i;
+ }
+
+ return -EFAULT;
+}
+
/**
* register_memslot_addr_range() - Register memory region to GenieZone
* @gzvm: Pointer to struct gzvm
@@ -60,7 +85,10 @@ register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
}
free_pages_exact(region, buf_size);
- return 0;
+
+ if (gzvm->mem_alloc_mode == GZVM_DEMAND_PAGING)
+ return 0;
+ return gzvm_vm_populate_mem_region(gzvm, memslot->slot_id);
}
/**
@@ -349,6 +377,22 @@ static const struct file_operations gzvm_vm_fops = {
.unlocked_ioctl = gzvm_vm_ioctl,
};
+static int setup_mem_alloc_mode(struct gzvm *vm)
+{
+ int ret;
+ struct gzvm_enable_cap cap = {0};
+
+ cap.cap = GZVM_CAP_ENABLE_DEMAND_PAGING;
+
+ ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
+ if (!ret)
+ vm->mem_alloc_mode = GZVM_DEMAND_PAGING;
+ else
+ vm->mem_alloc_mode = GZVM_FULLY_POPULATED;
+
+ return 0;
+}
+
static struct gzvm *gzvm_create_vm(struct gzvm_driver *drv, unsigned long vm_type)
{
int ret;
@@ -385,6 +429,8 @@ static struct gzvm *gzvm_create_vm(struct gzvm_driver *drv, unsigned long vm_typ
return ERR_PTR(ret);
}
+ setup_mem_alloc_mode(gzvm);
+
mutex_lock(&gzvm_list_lock);
list_add(&gzvm->vm_list, &gzvm_list);
mutex_unlock(&gzvm_list_lock);
@@ -45,6 +45,7 @@ struct gzvm_driver {
*/
#define NO_ERROR (0)
#define ERR_NO_MEMORY (-5)
+#define ERR_INVALID_ARGS (-8)
#define ERR_NOT_SUPPORTED (-24)
#define ERR_NOT_IMPLEMENTED (-27)
#define ERR_FAULT (-40)
@@ -59,6 +60,11 @@ struct gzvm_driver {
#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
+enum gzvm_demand_paging_mode {
+ GZVM_FULLY_POPULATED = 0,
+ GZVM_DEMAND_PAGING = 1,
+};
+
/**
* struct mem_region_addr_range: identical to ffa memory constituent
* @address: the base IPA of the constituent memory region, aligned to 4 kiB
@@ -137,6 +143,7 @@ struct gzvm_pinned_page {
* @irq_lock: lock for irq injection
* @pinned_pages: use rb-tree to record pin/unpin page
* @mem_lock: lock for memory operations
+ * @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
*/
struct gzvm {
struct gzvm_driver *gzvm_drv;
@@ -161,6 +168,7 @@ struct gzvm {
struct hlist_head irq_ack_notifier_list;
struct srcu_struct irq_srcu;
struct mutex irq_lock;
+ u32 mem_alloc_mode;
struct rb_root pinned_pages;
struct mutex mem_lock;
@@ -183,6 +191,8 @@ int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp);
int gzvm_arch_create_vm(unsigned long vm_type);
int gzvm_arch_destroy_vm(u16 vm_id);
+int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
+ u64 nr_pages);
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp);
@@ -201,6 +211,10 @@ int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
int gzvm_arch_inform_exit(u16 vm_id);
+int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
+int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
+bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);
+
int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
u32 irq, bool level);
@@ -18,6 +18,7 @@
#define GZVM_CAP_VM_GPA_SIZE 0xa5
#define GZVM_CAP_PROTECTED_VM 0xffbadab1
+#define GZVM_CAP_ENABLE_DEMAND_PAGING 0x9202
/* sub-commands put in args[0] for GZVM_CAP_PROTECTED_VM */
#define GZVM_CAP_PVM_SET_PVMFW_GPA 0
@@ -186,6 +187,12 @@ enum {
GZVM_EXIT_GZ = 0x9292000a,
};
+/* exception definitions of GZVM_EXIT_EXCEPTION */
+enum {
+ GZVM_EXCEPTION_UNKNOWN = 0x0,
+ GZVM_EXCEPTION_PAGE_FAULT = 0x1,
+};
+
/**
* struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is
* shared between userspace, kernel and
@@ -250,6 +257,12 @@ struct gzvm_vcpu_run {
__u32 exception;
/* Exception error codes */
__u32 error_code;
+ /* Fault GPA (guest physical address or IPA in ARM) */
+ __u64 fault_gpa;
+ /* Future-proof reservation and reset to zero in hypervisor.
+ * Fill up to the union size, 256 bytes.
+ */
+ __u64 reserved[30];
} exception;
/* GZVM_EXIT_HYPERCALL */
struct {