@@ -13,6 +13,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
+#define KVM_DIRTY_QUOTA_PAGE_OFFSET 64
#define DE_VECTOR 0
#define DB_VECTOR 1
@@ -11,7 +11,8 @@ KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
- $(KVM)/dirty_ring.o $(KVM)/binary_stats.o
+ $(KVM)/dirty_ring.o $(KVM)/binary_stats.o \
+ $(KVM)/dirty_quota_migration.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
@@ -14,4 +14,20 @@ struct vCPUDirtyQuotaContext {
u64 dirty_quota;
};
+#if (KVM_DIRTY_QUOTA_PAGE_OFFSET == 0)
+/*
+ * If KVM_DIRTY_QUOTA_PAGE_OFFSET is not defined by the arch, exclude
+ * dirty_quota_migration.o by defining these nop functions for the arch.
+ */
+static inline int kvm_vcpu_dirty_quota_alloc(struct vCPUDirtyQuotaContext **vCPUdqctx)
+{
+ return 0;
+}
+
+#else /* KVM_DIRTY_QUOTA_PAGE_OFFSET == 0 */
+
+int kvm_vcpu_dirty_quota_alloc(struct vCPUDirtyQuotaContext **vCPUdqctx);
+
+#endif /* KVM_DIRTY_QUOTA_PAGE_OFFSET == 0 */
+
#endif /* DIRTY_QUOTA_MIGRATION_H */
@@ -1905,6 +1905,15 @@ struct kvm_hyperv_eventfd {
#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+/*
+ * KVM_DIRTY_QUOTA_PAGE_OFFSET will be defined, and set to the
+ * starting page offset of dirty quota context structure, by the
+ * arch implementing dirty quota migration.
+ */
+#ifndef KVM_DIRTY_QUOTA_PAGE_OFFSET
+#define KVM_DIRTY_QUOTA_PAGE_OFFSET 0
+#endif
+
/*
* Arch needs to define the macro after implementing the dirty ring
* feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the
new file mode 100644
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/dirty_quota_migration.h>
+
+int kvm_vcpu_dirty_quota_alloc(struct vCPUDirtyQuotaContext **vCPUdqctx)
+{
+ u64 size = sizeof(struct vCPUDirtyQuotaContext);
+ *vCPUdqctx = vmalloc(size);
+ if (!(*vCPUdqctx))
+ return -ENOMEM;
+ memset((*vCPUdqctx), 0, size);
+ return 0;
+}
@@ -66,6 +66,7 @@
#include <trace/events/kvm.h>
#include <linux/kvm_dirty_ring.h>
+#include <linux/dirty_quota_migration.h>
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
@@ -1079,6 +1080,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
}
kvm->max_halt_poll_ns = halt_poll_ns;
+ kvm->dirty_quota_migration_enabled = false;
r = kvm_arch_init_vm(kvm, type);
if (r)
@@ -3638,6 +3640,12 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto arch_vcpu_destroy;
}
+ if (KVM_DIRTY_QUOTA_PAGE_OFFSET) {
+ r = kvm_vcpu_dirty_quota_alloc(&vcpu->vCPUdqctx);
+ if (r)
+ goto arch_vcpu_destroy;
+ }
+
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;