@@ -920,6 +920,8 @@ struct kvm_arch {
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
+
+ u8 dirty_logging_mode;
};
struct kvm_vm_stat {
@@ -1199,6 +1201,8 @@ struct kvm_arch_async_pf {
extern struct kvm_x86_ops *kvm_x86_ops;
+extern u8 kvm_default_dirty_log_mode;
+
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -420,4 +420,8 @@ struct kvm_nested_state {
__u8 data[0];
};
+#define KVM_DIRTY_LOG_MODE_WRPROT 1
+#define KVM_DIRTY_LOG_MODE_DBIT 2
+#define KVM_DIRTY_LOG_MODE_PML 4
+
#endif /* _ASM_X86_KVM_H */
@@ -262,6 +262,8 @@ static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+u8 __read_mostly kvm_default_dirty_log_mode;
+EXPORT_SYMBOL_GPL(kvm_default_dirty_log_mode);
static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
@@ -432,8 +434,12 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
shadow_acc_track_mask = acc_track_mask;
shadow_me_mask = me_mask;
- if (shadow_dirty_mask == 0)
+ if (shadow_dirty_mask == 0) {
enable_d_bit_logging = false;
+
+ if (kvm_default_dirty_log_mode == KVM_DIRTY_LOG_MODE_DBIT)
+ kvm_default_dirty_log_mode = KVM_DIRTY_LOG_MODE_WRPROT;
+ }
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
@@ -719,7 +725,7 @@ static void spte_dirty_mask_cleared(struct kvm *kvm, u64 *sptep)
* the D bit would result in those pages being unnecessarily reported as
* dirty again in the next round.
*/
- if (enable_d_bit_logging) {
+ if (kvm->arch.dirty_logging_mode == KVM_DIRTY_LOG_MODE_DBIT) {
gfn_t gfn;
struct kvm_mmu_page *sp = page_header(__pa(sptep));
@@ -1722,14 +1728,19 @@ void kvm_arch_mmu_get_and_reset_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long *mask)
{
- if (kvm_x86_ops->get_and_reset_log_dirty)
- kvm_x86_ops->get_and_reset_log_dirty(kvm, slot, gfn_offset,
- mask);
- else if (enable_d_bit_logging)
+ switch (kvm->arch.dirty_logging_mode) {
+ case KVM_DIRTY_LOG_MODE_WRPROT:
+ kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, *mask);
+ break;
+ case KVM_DIRTY_LOG_MODE_DBIT:
*mask |= kvm_mmu_shadow_dirty_mask_test_and_clear(kvm, slot,
gfn_offset);
- else
- kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, *mask);
+ break;
+ default:
+ if (kvm_x86_ops->get_and_reset_log_dirty)
+ kvm_x86_ops->get_and_reset_log_dirty(kvm, slot,
+ gfn_offset, mask);
+ }
}
/**
@@ -6057,6 +6068,9 @@ int kvm_mmu_module_init(void)
BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
kvm_mmu_reset_all_pte_masks();
+ kvm_default_dirty_log_mode = enable_d_bit_logging
+ ? KVM_DIRTY_LOG_MODE_DBIT
+ : KVM_DIRTY_LOG_MODE_WRPROT;
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
@@ -7992,7 +7992,8 @@ static __init int hardware_setup(void)
kvm_x86_ops->slot_disable_log_dirty = NULL;
kvm_x86_ops->flush_log_dirty = NULL;
kvm_x86_ops->get_and_reset_log_dirty = NULL;
- }
+ } else
+ kvm_default_dirty_log_mode = KVM_DIRTY_LOG_MODE_PML;
if (!cpu_has_vmx_preemption_timer())
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
@@ -4404,11 +4404,17 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
mutex_lock(&kvm->slots_lock);
- /*
- * Flush potentially hardware-cached dirty pages to dirty_bitmap.
- */
- if (kvm_x86_ops->flush_log_dirty)
- kvm_x86_ops->flush_log_dirty(kvm);
+ switch (kvm->arch.dirty_logging_mode) {
+ case KVM_DIRTY_LOG_MODE_WRPROT:
+ case KVM_DIRTY_LOG_MODE_DBIT:
+ break;
+ default:
+ /*
+ * Flush potentially hardware-cached dirty pages to dirty_bitmap.
+ */
+ if (kvm_x86_ops->flush_log_dirty)
+ kvm_x86_ops->flush_log_dirty(kvm);
+ }
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
@@ -9020,6 +9026,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.guest_can_read_msr_platform_info = true;
+ kvm->arch.dirty_logging_mode = kvm_default_dirty_log_mode;
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
@@ -9302,15 +9310,26 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* See the comments in fast_page_fault().
*/
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- if (kvm_x86_ops->slot_enable_log_dirty)
- kvm_x86_ops->slot_enable_log_dirty(kvm, new);
- else if (enable_d_bit_logging)
- kvm_mmu_slot_wrprot_lpage_and_clear_dirty(kvm, new);
- else
+ switch (kvm->arch.dirty_logging_mode) {
+ case KVM_DIRTY_LOG_MODE_WRPROT:
kvm_mmu_slot_remove_write_access(kvm, new);
+ break;
+ case KVM_DIRTY_LOG_MODE_DBIT:
+ kvm_mmu_slot_wrprot_lpage_and_clear_dirty(kvm, new);
+ break;
+ default:
+ if (kvm_x86_ops->slot_enable_log_dirty)
+ kvm_x86_ops->slot_enable_log_dirty(kvm, new);
+ }
} else {
- if (kvm_x86_ops->slot_disable_log_dirty)
- kvm_x86_ops->slot_disable_log_dirty(kvm, new);
+ switch (kvm->arch.dirty_logging_mode) {
+ case KVM_DIRTY_LOG_MODE_WRPROT:
+ case KVM_DIRTY_LOG_MODE_DBIT:
+ break;
+ default:
+ if (kvm_x86_ops->slot_disable_log_dirty)
+ kvm_x86_ops->slot_disable_log_dirty(kvm, new);
+ }
}
}
Add the ability to have different VMs use different dirty logging modes. Signed-off-by: Junaid Shahid <junaids@google.com> --- arch/x86/include/asm/kvm_host.h | 4 +++ arch/x86/include/uapi/asm/kvm.h | 4 +++ arch/x86/kvm/mmu.c | 30 +++++++++++++++++------ arch/x86/kvm/vmx.c | 3 ++- arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++--------- 5 files changed, 63 insertions(+), 21 deletions(-)