@@ -1951,6 +1951,53 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
return ret;
}
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ struct kvm_vcpu *vcpu;
+ kvm_pfn_t pfn;
+ gfn_t gfn;
+ int idx;
+ bool ret = true;
+
+ /* Need vcpu context for kvm_mmu_do_page_fault. */
+ vcpu = kvm_get_vcpu(kvm, 0);
+ if (mutex_lock_killable(&vcpu->mutex))
+ return false;
+
+ vcpu_load(vcpu);
+ idx = srcu_read_lock(&kvm->srcu);
+
+ kvm_mmu_reload(vcpu);
+
+ gfn = range->start;
+ while (gfn < range->end) {
+ if (signal_pending(current)) {
+ ret = false;
+ break;
+ }
+
+ if (need_resched())
+ cond_resched();
+
+ pfn = kvm_mmu_do_page_fault(vcpu, gfn << PAGE_SHIFT,
+ PFERR_WRITE_MASK | PFERR_USER_MASK,
+ false);
+ if (is_error_noslot_pfn(pfn) || kvm->vm_bugged) {
+ ret = false;
+ break;
+ }
+
+ gfn++;
+ }
+
+ srcu_read_unlock(&kvm->srcu, idx);
+ vcpu_put(vcpu);
+
+ mutex_unlock(&vcpu->mutex);
+
+ return ret;
+}
+
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool flush = false;
@@ -237,6 +237,8 @@ struct kvm_gfn_range {
pte_t pte;
bool may_block;
};
+
+bool kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -471,6 +471,11 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
srcu_read_unlock(&kvm->srcu, idx);
}
+bool __weak kvm_map_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return false;
+}
+
typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,