@@ -6554,6 +6554,9 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
for_each_rmap_spte(rmap_head, &iter, sptep) {
sp = sptep_to_sp(sptep);
+ /* Private page dirty logging is not supported yet. */
+ KVM_BUG_ON(is_private_sptep(sptep), kvm);
+
/*
* We cannot do huge page mapping for indirect shadow pages,
* which are found on the last rmap (level = 1) when not using
@@ -1420,7 +1420,8 @@ typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
struct kvm_gfn_range *range,
- tdp_handler_t handler)
+ tdp_handler_t handler,
+ bool only_shared)
{
struct kvm_mmu_page *root;
struct tdp_iter iter;
@@ -1431,9 +1432,23 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
* into this helper allow blocking; it'd be dead, wasteful code.
*/
for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
+ gfn_t start;
+ gfn_t end;
+
+ if (only_shared && is_private_sp(root))
+ continue;
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
+ /*
+ * For TDX shared mapping, set GFN shared bit to the range,
+ * so the handler() doesn't need to set it, to avoid duplicated
+ * code in multiple handler()s.
+ */
+ start = kvm_gfn_for_root(kvm, root, range->start);
+ end = kvm_gfn_for_root(kvm, root, range->end);
+
+ tdp_root_for_each_leaf_pte(iter, root, start, end)
ret |= handler(kvm, &iter, range);
rcu_read_unlock();
@@ -1477,7 +1492,12 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
+ /*
+ * First TDX generation doesn't support clearing A bit for private
+ * mapping, since there's no secure EPT API to support it. However
+ * it's a legitimate request for TDX guest.
+ */
+ return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range, true);
}
static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
@@ -1488,7 +1508,8 @@ static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
+ /* The first TDX generation doesn't support A bit. */
+ return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn, true);
}
static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
@@ -1533,8 +1554,11 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
* No need to handle the remote TLB flush under RCU protection, the
* target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
* shadow page. See the WARN on pfn_changed in __handle_changed_spte().
+ *
+ * .change_pte() callback should not happen for private page, because
+ * for now TDX private pages are pinned during VM's life time.
*/
- return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
+ return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn, true);
}
/*
@@ -1588,6 +1612,14 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * Because first TDX generation doesn't support write protecting private
+ * mappings and kvm_arch_dirty_log_supported(kvm) = false, it's a bug
+ * to reach here for guest TD.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return false;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
@@ -1853,6 +1885,14 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * First TDX generation doesn't support clearing dirty bit,
+ * since there's no secure EPT API to support it. It is a
+ * bug to reach here for TDX guest.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return false;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
@@ -1919,6 +1959,13 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_mmu_page *root;
lockdep_assert_held_write(&kvm->mmu_lock);
+ /*
+ * First TDX generation doesn't support clearing dirty bit,
+ * since there's no secure EPT API to support it. For now silently
+ * ignore KVM_CLEAR_DIRTY_LOG.
+ */
+ if (!kvm_arch_dirty_log_supported(kvm))
+ return;
for_each_tdp_mmu_root(kvm, root, slot->as_id)
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
}
@@ -1985,6 +2032,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
lockdep_assert_held_read(&kvm->mmu_lock);
+ /*
+ * This should only be reachable when diryt-log is supported. It's a
+ * bug to reach here.
+ */
+ if (WARN_ON_ONCE(!kvm_arch_dirty_log_supported(kvm)))
+ return;
+
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
zap_collapsible_spte_range(kvm, root, slot);
}
@@ -2038,6 +2092,15 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
+
+ /*
+ * First TDX generation doesn't support write protecting private
+ * mappings, silently ignore the request. KVM_GET_DIRTY_LOG etc
+ * can reach here, no warning.
+ */
+ if (!kvm_arch_dirty_log_supported(kvm))
+ return false;
+
for_each_tdp_mmu_root(kvm, root, slot->as_id)
spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
@@ -12621,6 +12621,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
u32 new_flags = new ? new->flags : 0;
bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
+ if (!kvm_arch_dirty_log_supported(kvm) && log_dirty_pages)
+ return;
+
/*
* Update CPU dirty logging if dirty logging is being toggled. This
* applies to all operations.