@@ -217,6 +217,11 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int __kvm_mmu_map_gpa(struct kvm *kvm, gfn_t *startp, gfn_t end,
+ bool map_private);
+int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
+ bool map_private);
+
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
@@ -6743,6 +6743,66 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
}
}
+int __kvm_mmu_map_gpa(struct kvm *kvm, gfn_t *startp, gfn_t end,
+ bool map_private)
+{
+ gfn_t start = *startp;
+ int attr;
+ int ret;
+
+ if (!kvm_gfn_shared_mask(kvm))
+ return -EOPNOTSUPP;
+
+ attr = map_private ? KVM_MEM_ATTR_PRIVATE : KVM_MEM_ATTR_SHARED;
+ start = start & ~kvm_gfn_shared_mask(kvm);
+ end = end & ~kvm_gfn_shared_mask(kvm);
+
+ /*
+ * To make the following kvm_vm_set_mem_attr() success within spinlock
+ * without memory allocation.
+ */
+ ret = kvm_vm_reserve_mem_attr(kvm, start, end);
+ if (ret)
+ return ret;
+
+ write_lock(&kvm->mmu_lock);
+ if (is_tdp_mmu_enabled(kvm)) {
+ gfn_t s = start;
+
+ ret = kvm_tdp_mmu_map_gpa(kvm, &s, end, map_private);
+ if (!ret) {
+ KVM_BUG_ON(kvm_vm_set_mem_attr(kvm, attr, start, end), kvm);
+ } else if (ret == -EAGAIN) {
+ KVM_BUG_ON(kvm_vm_set_mem_attr(kvm, attr, start, s), kvm);
+ start = s;
+ }
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+ write_unlock(&kvm->mmu_lock);
+
+ if (ret == -EAGAIN) {
+ if (map_private)
+ *startp = kvm_gfn_private(kvm, start);
+ else
+ *startp = kvm_gfn_shared(kvm, start);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kvm_mmu_map_gpa);
+
+int kvm_mmu_map_gpa(struct kvm_vcpu *vcpu, gfn_t *startp, gfn_t end,
+ bool map_private)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+
+ if (!VALID_PAGE(mmu->root.hpa) || !VALID_PAGE(mmu->private_root_hpa))
+ return -EINVAL;
+
+ return __kvm_mmu_map_gpa(vcpu->kvm, startp, end, map_private);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_map_gpa);
+
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -2111,6 +2111,41 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
return spte_set;
}
+int kvm_tdp_mmu_map_gpa(struct kvm *kvm,
+ gfn_t *startp, gfn_t end, bool map_private)
+{
+ struct kvm_mmu_page *root;
+ gfn_t start = *startp;
+ bool flush = false;
+ int i;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ KVM_BUG_ON(start & kvm_gfn_shared_mask(kvm), kvm);
+ KVM_BUG_ON(end & kvm_gfn_shared_mask(kvm), kvm);
+
+ kvm_mmu_invalidate_begin(kvm, start, end);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ for_each_tdp_mmu_root_yield_safe(kvm, root, i) {
+ if (is_private_sp(root) == map_private)
+ continue;
+
+ /*
+ * TODO: If necessary, return to the caller with -EAGAIN
+ * instead of yield-and-resume within
+ * tdp_mmu_zap_leafs().
+ */
+ flush = tdp_mmu_zap_leafs(kvm, root, start, end,
+ /*can_yield=*/true, flush,
+ /*zap_private=*/is_private_sp(root));
+ }
+ }
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, start, end - start);
+ kvm_mmu_invalidate_end(kvm, start, end);
+
+ return 0;
+}
+
/*
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
@@ -51,6 +51,9 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
gfn_t start, gfn_t end,
int target_level, bool shared);
+int kvm_tdp_mmu_map_gpa(struct kvm *kvm,
+ gfn_t *startp, gfn_t end, bool map_private);
+
static inline void kvm_tdp_mmu_walk_lockless_begin(void)
{
rcu_read_lock();