@@ -2829,6 +2829,22 @@ static bool zap_direct_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
return direct_walk_iterator_end_traversal(&iter);
}
+static int zap_direct_gfn_range_handler(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t start, gfn_t end,
+ unsigned long data)
+{
+ return zap_direct_gfn_range(kvm, slot->as_id, start, end,
+ MMU_WRITE_LOCK);
+}
+
+static bool zap_direct_hva_range(struct kvm *kvm, unsigned long start,
+ unsigned long end)
+{
+ return kvm_handle_direct_hva_range(kvm, start, end, 0,
+ zap_direct_gfn_range_handler);
+}
+
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data,
int (*handler)(struct kvm *kvm,
@@ -2842,7 +2858,13 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
- return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ int r = 0;
+
+ if (kvm->arch.direct_mmu_enabled)
+ r |= zap_direct_hva_range(kvm, start, end);
+ if (!kvm->arch.pure_direct_mmu)
+ r |= kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ return r;
}
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
Implements arch specific handler functions for the invalidation MMU notifiers, using a paging structure iterator. These handlers are responsible for zapping paging structure entries to enable the main MM to safely remap memory that was used to back guest memory. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-)