@@ -25,6 +25,7 @@ enum {
GZVM_FUNC_MEMREGION_PURPOSE = 15,
GZVM_FUNC_SET_DTB_CONFIG = 16,
GZVM_FUNC_MAP_GUEST = 17,
+ GZVM_FUNC_MAP_GUEST_BLOCK = 18,
NR_GZVM_FUNC,
};
@@ -50,6 +51,7 @@ enum {
#define MT_HVC_GZVM_MEMREGION_PURPOSE GZVM_HCALL_ID(GZVM_FUNC_MEMREGION_PURPOSE)
#define MT_HVC_GZVM_SET_DTB_CONFIG GZVM_HCALL_ID(GZVM_FUNC_SET_DTB_CONFIG)
#define MT_HVC_GZVM_MAP_GUEST GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST)
+#define MT_HVC_GZVM_MAP_GUEST_BLOCK GZVM_HCALL_ID(GZVM_FUNC_MAP_GUEST_BLOCK)
#define GIC_V3_NR_LRS 16
@@ -347,10 +347,11 @@ static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
fallthrough;
case GZVM_CAP_PVM_SET_PROTECTED_VM:
/*
- * To improve performance for protected VM, we have to populate VM's memory
- * before VM booting
+ * If the hypervisor doesn't support block-based demand paging, we
+ * populate memory in advance to improve performance for protected VM.
*/
- populate_all_mem_regions(gzvm);
+ if (gzvm->demand_page_gran == PAGE_SIZE)
+ populate_all_mem_regions(gzvm);
ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
return ret;
case GZVM_CAP_PVM_GET_PVMFW_SIZE:
@@ -374,7 +375,10 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
case GZVM_CAP_PROTECTED_VM:
ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
return ret;
+
case GZVM_CAP_ENABLE_DEMAND_PAGING:
+ fallthrough;
+ case GZVM_CAP_BLOCK_BASED_DEMAND_PAGING:
ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
return ret;
default:
@@ -417,3 +421,11 @@ int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
pfn, gfn, nr_pages, 0, 0, &res);
}
+
+int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages)
+{
+ struct arm_smccc_res res;
+
+ return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST_BLOCK, vm_id,
+ memslot_id, gfn, nr_pages, 0, 0, 0, &res);
+}
@@ -115,6 +115,50 @@ int gzvm_vm_allocate_guest_page(struct gzvm_memslot *slot, u64 gfn, u64 *pfn)
return 0;
}
+static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
+{
+ u64 pfn, __gfn;
+ int ret, i;
+
+ u32 nr_entries = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE;
+ struct gzvm_memslot *memslot = &vm->memslot[memslot_id];
+ u64 start_gfn = ALIGN_DOWN(gfn, nr_entries);
+ u32 total_pages = memslot->npages;
+ u64 base_gfn = memslot->base_gfn;
+
+ /*
+ * If the start/end gfn of this demand paging block is outside the
+ * memory region of memslot, adjust the start_gfn/nr_entries.
+ */
+ if (start_gfn < base_gfn)
+ start_gfn = base_gfn;
+
+ if (start_gfn + nr_entries > base_gfn + total_pages)
+ nr_entries = base_gfn + total_pages - start_gfn;
+
+ mutex_lock(&vm->demand_paging_lock);
+ for (i = 0, __gfn = start_gfn; i < nr_entries; i++, __gfn++) {
+ ret = gzvm_vm_allocate_guest_page(memslot, __gfn, &pfn);
+ if (unlikely(ret)) {
+ ret = -ERR_FAULT;
+ goto err_unlock;
+ }
+ vm->demand_page_buffer[i] = pfn;
+ }
+
+ ret = gzvm_arch_map_guest_block(vm->vm_id, memslot_id, start_gfn,
+ nr_entries);
+ if (unlikely(ret)) {
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+err_unlock:
+ mutex_unlock(&vm->demand_paging_lock);
+
+ return ret;
+}
+
static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
{
int ret;
@@ -154,5 +198,8 @@ int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu)
if (unlikely(vm->mem_alloc_mode == GZVM_FULLY_POPULATED))
return -EFAULT;
- return handle_single_demand_page(vm, memslot_id, gfn);
+ if (vm->demand_page_gran == PAGE_SIZE)
+ return handle_single_demand_page(vm, memslot_id, gfn);
+ else
+ return handle_block_demand_page(vm, memslot_id, gfn);
}
@@ -301,6 +301,8 @@ static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
static void gzvm_destroy_vm(struct gzvm *gzvm)
{
+ size_t allocated_size;
+
pr_debug("VM-%u is going to be destroyed\n", gzvm->vm_id);
mutex_lock(&gzvm->lock);
@@ -313,6 +315,11 @@ static void gzvm_destroy_vm(struct gzvm *gzvm)
list_del(&gzvm->vm_list);
mutex_unlock(&gzvm_list_lock);
+ if (gzvm->demand_page_buffer) {
+ allocated_size = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE * sizeof(u64);
+ free_pages_exact(gzvm->demand_page_buffer, allocated_size);
+ }
+
mutex_unlock(&gzvm->lock);
kfree(gzvm);
@@ -332,6 +339,46 @@ static const struct file_operations gzvm_vm_fops = {
.llseek = noop_llseek,
};
+/**
+ * setup_vm_demand_paging - Query hypervisor suitable demand page size and set
+ * @vm: gzvm instance for setting up demand page size
+ *
+ * Return: void
+ */
+static void setup_vm_demand_paging(struct gzvm *vm)
+{
+ u32 buf_size = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE * sizeof(u64);
+ struct gzvm_enable_cap cap = {0};
+ void *buffer;
+ int ret;
+
+ mutex_init(&vm->demand_paging_lock);
+ buffer = alloc_pages_exact(buf_size, GFP_KERNEL);
+ if (!buffer) {
+ /* Fall back to use default page size for demand paging */
+ vm->demand_page_gran = PAGE_SIZE;
+ vm->demand_page_buffer = NULL;
+ return;
+ }
+
+ cap.cap = GZVM_CAP_BLOCK_BASED_DEMAND_PAGING;
+ cap.args[0] = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
+ cap.args[1] = (__u64)virt_to_phys(buffer);
+ /* demand_page_buffer is freed when destroy VM */
+ vm->demand_page_buffer = buffer;
+
+ ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
+ if (ret == 0) {
+ vm->demand_page_gran = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
+ /* freed when destroy vm */
+ vm->demand_page_buffer = buffer;
+ } else {
+ vm->demand_page_gran = PAGE_SIZE;
+ vm->demand_page_buffer = NULL;
+ free_pages_exact(buffer, buf_size);
+ }
+}
+
static int setup_mem_alloc_mode(struct gzvm *vm)
{
int ret;
@@ -340,10 +387,12 @@ static int setup_mem_alloc_mode(struct gzvm *vm)
cap.cap = GZVM_CAP_ENABLE_DEMAND_PAGING;
ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
- if (!ret)
+ if (!ret) {
vm->mem_alloc_mode = GZVM_DEMAND_PAGING;
- else
+ setup_vm_demand_paging(vm);
+ } else {
vm->mem_alloc_mode = GZVM_FULLY_POPULATED;
+ }
return 0;
}
@@ -44,6 +44,8 @@
#define GZVM_VCPU_RUN_MAP_SIZE (PAGE_SIZE * 2)
+#define GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE (2 * 1024 * 1024) /* 2MB */
+
enum gzvm_demand_paging_mode {
GZVM_FULLY_POPULATED = 0,
GZVM_DEMAND_PAGING = 1,
@@ -112,6 +114,11 @@ struct gzvm_vcpu {
* @irq_srcu: structure data for SRCU(sleepable rcu)
* @irq_lock: lock for irq injection
* @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
+ * @demand_page_gran: demand page granularity: how much memory we allocate for
+ * VM in a single page fault
+ * @demand_page_buffer: the mailbox for transferring large portion pages
+ * @demand_paging_lock: lock for preventing multiple cpu using the same demand
+ * page mailbox at the same time
*/
struct gzvm {
struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
@@ -135,6 +142,10 @@ struct gzvm {
struct srcu_struct irq_srcu;
struct mutex irq_lock;
u32 mem_alloc_mode;
+
+ u32 demand_page_gran;
+ u64 *demand_page_buffer;
+ struct mutex demand_paging_lock;
};
long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
@@ -155,6 +166,7 @@ int gzvm_arch_create_vm(unsigned long vm_type);
int gzvm_arch_destroy_vm(u16 vm_id);
int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
u64 nr_pages);
+int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages);
int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
struct gzvm_enable_cap *cap,
void __user *argp);
@@ -18,6 +18,8 @@
#define GZVM_CAP_VM_GPA_SIZE 0xa5
#define GZVM_CAP_PROTECTED_VM 0xffbadab1
+/* query hypervisor supported block-based demand page */
+#define GZVM_CAP_BLOCK_BASED_DEMAND_PAGING 0x9201
#define GZVM_CAP_ENABLE_DEMAND_PAGING 0x9202
/* sub-commands put in args[0] for GZVM_CAP_PROTECTED_VM */