@@ -151,6 +151,9 @@ u64 tdh_mem_page_remove(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
u64 tdh_phymem_cache_wb(bool resume);
u64 tdh_phymem_page_wbinvd_tdr(u64 tdr);
u64 tdh_phymem_page_wbinvd_hkid(u64 hpa, u64 hkid);
+u64 tdh_mmio_map(u64 tdr, u64 gpa, u64 level, u64 hpa, u64 *rcx, u64 *rdx);
+u64 tdh_mmio_block(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
+u64 tdh_mmio_unmap(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx);
#else
static inline void tdx_init(void) { }
static inline int tdx_cpu_enable(void) { return -ENODEV; }
@@ -1576,6 +1576,29 @@ static int tdx_mem_page_aug(struct kvm *kvm, gfn_t gfn,
return 0;
}
+static int tdx_mmio_map(struct kvm *kvm, gfn_t gfn,
+ enum pg_level level, kvm_pfn_t pfn)
+{
+ int tdx_level = pg_level_to_tdx_sept_level(level);
+ struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
+ hpa_t hpa = pfn_to_hpa(pfn);
+ gpa_t gpa = gfn_to_gpa(gfn);
+ u64 entry, level_state;
+ u64 err;
+
+ err = tdh_mmio_map(kvm_tdx->tdr_pa, gpa, tdx_level, hpa,
+ &entry, &level_state);
+ if (unlikely(err & TDX_OPERAND_BUSY))
+ return -EBUSY;
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_MAP, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* KVM_TDX_INIT_MEM_REGION calls kvm_gmem_populate() to get guest pages and
* tdx_gmem_post_populate() to premap page table pages into private EPT.
@@ -1610,6 +1633,9 @@ int tdx_sept_set_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(level != PG_LEVEL_4K, kvm))
return -EINVAL;
+ if (kvm_is_mmio_pfn(pfn))
+ return tdx_mmio_map(kvm, gfn, level, pfn);
+
/*
* Because guest_memfd doesn't support page migration with
* a_ops->migrate_folio (yet), no callback is triggered for KVM on page
@@ -1647,6 +1673,20 @@ static int tdx_sept_drop_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(!is_hkid_assigned(kvm_tdx), kvm))
return -EINVAL;
+ if (kvm_is_mmio_pfn(pfn)) {
+ do {
+ err = tdh_mmio_unmap(kvm_tdx->tdr_pa, gpa, tdx_level,
+ &entry, &level_state);
+ } while (unlikely(err == TDX_ERROR_SEPT_BUSY));
+
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_UNMAP, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+ }
+
do {
/*
* When zapping private page, write lock is held. So no race
@@ -1715,7 +1755,7 @@ int tdx_sept_link_private_spt(struct kvm *kvm, gfn_t gfn,
}
static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
- enum pg_level level)
+ enum pg_level level, kvm_pfn_t pfn)
{
int tdx_level = pg_level_to_tdx_sept_level(level);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
@@ -1725,6 +1765,19 @@ static int tdx_sept_zap_private_spte(struct kvm *kvm, gfn_t gfn,
/* For now large page isn't supported yet. */
WARN_ON_ONCE(level != PG_LEVEL_4K);
+ if (kvm_is_mmio_pfn(pfn)) {
+ err = tdh_mmio_block(kvm_tdx->tdr_pa, gpa, tdx_level,
+ &entry, &level_state);
+ if (unlikely(err == TDX_ERROR_SEPT_BUSY))
+ return -EAGAIN;
+ if (KVM_BUG_ON(err, kvm)) {
+ pr_tdx_error_2(TDH_MMIO_BLOCK, err, entry, level_state);
+ return -EIO;
+ }
+
+ return 0;
+ }
+
err = tdh_mem_range_block(kvm_tdx->tdr_pa, gpa, tdx_level, &entry, &level_state);
if (unlikely(err == TDX_ERROR_SEPT_BUSY))
return -EAGAIN;
@@ -1816,7 +1869,7 @@ int tdx_sept_remove_private_spte(struct kvm *kvm, gfn_t gfn,
if (KVM_BUG_ON(!is_hkid_assigned(to_kvm_tdx(kvm)), kvm))
return -EINVAL;
- ret = tdx_sept_zap_private_spte(kvm, gfn, level);
+ ret = tdx_sept_zap_private_spte(kvm, gfn, level, pfn);
if (ret)
return ret;
@@ -1951,3 +1951,55 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hpa, u64 hkid)
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
+
+u64 tdh_mmio_map(u64 tdr, u64 gpa, u64 level, u64 hpa, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ .r8 = hpa,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_MAP, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_map);
+
+u64 tdh_mmio_block(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_BLOCK, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_block);
+
+u64 tdh_mmio_unmap(u64 tdr, u64 gpa, u64 level, u64 *rcx, u64 *rdx)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdr,
+ };
+ u64 ret;
+
+ ret = tdx_seamcall_sept(TDH_MMIO_UNMAP, &args);
+
+ *rcx = args.rcx;
+ *rdx = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mmio_unmap);
@@ -49,6 +49,9 @@
#define TDH_VP_WR 43
#define TDH_PHYMEM_PAGE_WBINVD 41
#define TDH_SYS_CONFIG 45
+#define TDH_MMIO_MAP 158
+#define TDH_MMIO_BLOCK 159
+#define TDH_MMIO_UNMAP 160
/*
* SEAMCALL leaf: