@@ -696,6 +696,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_shadow_page_cache;
struct kvm_mmu_memory_cache mmu_gfn_array_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
+ struct kvm_mmu_memory_cache mmu_private_sp_cache;
/*
* QEMU userspace and the guest each have their own FPU state.
@@ -716,6 +716,13 @@ static int mmu_topup_shadow_page_cache(struct kvm_vcpu *vcpu)
int start, end, i, r;
bool is_tdp_mmu = is_tdp_mmu_enabled(vcpu->kvm);
+ if (kvm_gfn_shared_mask(vcpu->kvm)) {
+ r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_private_sp_cache,
+ PT64_ROOT_MAX_LEVEL);
+ if (r)
+ return r;
+ }
+
if (is_tdp_mmu && shadow_nonpresent_value)
start = kvm_mmu_memory_cache_nr_free_objects(mc);
@@ -757,6 +764,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_private_sp_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
}
@@ -1759,6 +1767,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
if (!direct)
sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
+ kvm_mmu_init_private_sp(sp);
/*
* active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
@@ -55,6 +55,10 @@ struct kvm_mmu_page {
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
+#ifdef CONFIG_KVM_MMU_PRIVATE
+ /* associated private shadow page, e.g. SEPT page. */
+ void *private_sp;
+#endif
/* Currently serving as active root */
union {
int root_count;
@@ -115,6 +119,86 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role);
}
+/*
+ * TDX vcpu allocates page for root Secure EPT page and assigns to CPU secure
+ * EPT pointer. KVM doesn't need to allocate and link to the secure EPT.
+ * Dummy value to make is_pivate_sp() return true.
+ */
+#define KVM_MMU_PRIVATE_SP_ROOT ((void *)1)
+
+#ifdef CONFIG_KVM_MMU_PRIVATE
+static inline bool is_private_sp(struct kvm_mmu_page *sp)
+{
+ return !!sp->private_sp;
+}
+
+static inline bool is_private_sptep(u64 *sptep)
+{
+ WARN_ON(!sptep);
+ return is_private_sp(sptep_to_sp(sptep));
+}
+
+static inline void *kvm_mmu_private_sp(struct kvm_mmu_page *sp)
+{
+ return sp->private_sp;
+}
+
+static inline void kvm_mmu_init_private_sp(struct kvm_mmu_page *sp)
+{
+ sp->private_sp = NULL;
+}
+
+/* Valid sp->role.level is required. */
+static inline void kvm_mmu_alloc_private_sp(
+ struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool is_root)
+{
+ if (is_root)
+ sp->private_sp = KVM_MMU_PRIVATE_SP_ROOT;
+ else
+ sp->private_sp = kvm_mmu_memory_cache_alloc(
+ &vcpu->arch.mmu_private_sp_cache);
+ /*
+ * Because mmu_private_sp_cache is topped up before staring kvm page
+ * fault resolving, the allocation above shouldn't fail.
+ */
+ WARN_ON_ONCE(!sp->private_sp);
+}
+
+static inline void kvm_mmu_free_private_sp(struct kvm_mmu_page *sp)
+{
+ if (sp->private_sp != KVM_MMU_PRIVATE_SP_ROOT)
+ free_page((unsigned long)sp->private_sp);
+}
+#else
+static inline bool is_private_sp(struct kvm_mmu_page *sp)
+{
+ return false;
+}
+
+static inline bool is_private_sptep(u64 *sptep)
+{
+ return false;
+}
+
+static inline void *kvm_mmu_private_sp(struct kvm_mmu_page *sp)
+{
+ return NULL;
+}
+
+static inline void kvm_mmu_init_private_sp(struct kvm_mmu_page *sp)
+{
+}
+
+static inline void kvm_mmu_alloc_private_sp(
+ struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool is_root)
+{
+}
+
+static inline void kvm_mmu_free_private_sp(struct kvm_mmu_page *sp)
+{
+}
+#endif
+
static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{
/*
@@ -72,6 +72,8 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
+ if (is_private_sp(sp))
+ kvm_mmu_free_private_sp(sp);
free_page((unsigned long)sp->spt);
kmem_cache_free(mmu_page_header_cache, sp);
}
@@ -295,6 +297,7 @@ static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
sp->gfn = gfn;
sp->ptep = sptep;
sp->tdp_mmu_page = true;
+ kvm_mmu_init_private_sp(sp);
trace_kvm_mmu_get_page(sp, true);
}