@@ -377,23 +377,13 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- struct kvm_rmap_head *rmap_head;
-
if (tdp_mmu_enabled)
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
- if (!kvm_memslots_have_rmaps(kvm))
- return;
-
- while (mask) {
- rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
- PG_LEVEL_4K, slot);
- rmap_write_protect(rmap_head, false);
-
- /* clear the first set bit */
- mask &= mask - 1;
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ kvm_shadow_mmu_write_protect_pt_masked(kvm, slot, gfn_offset,
+ mask);
}
/**
@@ -410,23 +400,13 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
- struct kvm_rmap_head *rmap_head;
-
if (tdp_mmu_enabled)
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
- if (!kvm_memslots_have_rmaps(kvm))
- return;
-
- while (mask) {
- rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
- PG_LEVEL_4K, slot);
- __rmap_clear_dirty(kvm, rmap_head, slot);
-
- /* clear the first set bit */
- mask &= mask - 1;
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ kvm_shadow_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset,
+ mask);
}
/**
@@ -484,16 +464,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level)
{
- struct kvm_rmap_head *rmap_head;
- int i;
bool write_protected = false;
- if (kvm_memslots_have_rmaps(kvm)) {
- for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
- rmap_head = gfn_to_rmap(gfn, i, slot);
- write_protected |= rmap_write_protect(rmap_head, true);
- }
- }
+ if (kvm_memslots_have_rmaps(kvm))
+ write_protected |=
+ kvm_shadow_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
if (tdp_mmu_enabled)
write_protected |=
@@ -2915,8 +2890,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
{
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
- walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+ kvm_shadow_mmu_wrprot_slot(kvm, memslot, start_level);
write_unlock(&kvm->mmu_lock);
}
@@ -3067,11 +3041,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
{
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
- /*
- * Clear dirty bits only on 4k SPTEs since the legacy MMU only
- * support dirty logging at a 4k granularity.
- */
- walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
+ kvm_shadow_mmu_clear_dirty_slot(kvm, memslot);
write_unlock(&kvm->mmu_lock);
}
@@ -3453,3 +3453,67 @@ unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free)
return freed;
}
+
+void kvm_shadow_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ struct kvm_rmap_head *rmap_head;
+
+ while (mask) {
+ rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+ PG_LEVEL_4K, slot);
+ rmap_write_protect(rmap_head, false);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
+void kvm_shadow_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ struct kvm_rmap_head *rmap_head;
+
+ while (mask) {
+ rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+ PG_LEVEL_4K, slot);
+ __rmap_clear_dirty(kvm, rmap_head, slot);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
+bool kvm_shadow_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ u64 gfn, int min_level)
+{
+ struct kvm_rmap_head *rmap_head;
+ int i;
+ bool write_protected = false;
+
+ if (kvm_memslots_have_rmaps(kvm)) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ rmap_head = gfn_to_rmap(gfn, i, slot);
+ write_protected |= rmap_write_protect(rmap_head, true);
+ }
+ }
+
+ return write_protected;
+}
+
+void kvm_shadow_mmu_clear_dirty_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false);
+}
+
+void kvm_shadow_mmu_wrprot_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot,
+ int start_level)
+{
+ walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
+}
@@ -117,6 +117,21 @@ void kvm_shadow_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_shadow_mmu_has_zapped_obsolete_pages(struct kvm *kvm);
unsigned long kvm_shadow_mmu_shrink_scan(struct kvm *kvm, int pages_to_free);
+void kvm_shadow_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+void kvm_shadow_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+bool kvm_shadow_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ u64 gfn, int min_level);
+void kvm_shadow_mmu_clear_dirty_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
+void kvm_shadow_mmu_wrprot_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot,
+ int start_level);
+
/* Exports from paging_tmpl.h */
gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u64 access,
There are several functions in mmu.c which bifrucate to the Shadow and/or TDP MMU implementations. In most of these, the Shadow MMU implementation is open-coded. Wrap these instances in a nice function which just needs kvm and slot arguments or similar. This matches the TDP MMU interface and will allow for some nice cleanups in a following commit. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu/mmu.c | 52 ++++++---------------------- arch/x86/kvm/mmu/shadow_mmu.c | 64 +++++++++++++++++++++++++++++++++++ arch/x86/kvm/mmu/shadow_mmu.h | 15 ++++++++ 3 files changed, 90 insertions(+), 41 deletions(-)