@@ -1236,10 +1236,12 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+bool kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+bool kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_wrprot_lpage_and_clear_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
@@ -5747,7 +5747,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
}
-void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+bool kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
@@ -5765,12 +5765,12 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
* out of mmu lock also guarantees no dirty pages will be lost in
* dirty_bitmap.
*/
- if (flush)
- kvm_flush_remote_tlbs(kvm);
+
+ return flush;
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+bool kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
bool flush;
@@ -5783,10 +5783,21 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
/* see kvm_mmu_slot_remove_write_access */
lockdep_assert_held(&kvm->slots_lock);
+ return flush;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+
+void kvm_mmu_slot_wrprot_lpage_and_clear_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot)
+{
+ bool flush;
+
+ flush = kvm_mmu_slot_leaf_clear_dirty(kvm, memslot);
+ flush |= kvm_mmu_slot_largepage_remove_write_access(kvm, memslot);
if (flush)
kvm_flush_remote_tlbs(kvm);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
+EXPORT_SYMBOL_GPL(kvm_mmu_slot_wrprot_lpage_and_clear_dirty);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
@@ -14380,8 +14380,7 @@ static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
static void vmx_slot_enable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
- kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
- kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
+ kvm_mmu_slot_wrprot_lpage_and_clear_dirty(kvm, slot);
}
static void vmx_slot_disable_log_dirty(struct kvm *kvm,
This can currently invoke two remote TLB flushes, one each from clearing D bits and write protecting large pages. Instead, add a wrapper that does both and then invokes a single remote TLB flush if needed. Signed-off-by: Junaid Shahid <junaids@google.com> --- arch/x86/include/asm/kvm_host.h | 6 ++++-- arch/x86/kvm/mmu.c | 21 ++++++++++++++++----- arch/x86/kvm/vmx.c | 3 +-- 3 files changed, 21 insertions(+), 9 deletions(-)