@@ -434,9 +434,10 @@
#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
-#define IOMMU_PROT_MASK 0x03
-#define IOMMU_PROT_IR 0x01
-#define IOMMU_PROT_IW 0x02
+#define IOMMU_PROT_MASK 0x07
+#define IOMMU_PROT_IR 0x01
+#define IOMMU_PROT_IW 0x02
+#define IOMMU_PROT_MMIO 0x04
#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
@@ -373,6 +373,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
__pte |= IOMMU_PTE_IR;
if (prot & IOMMU_PROT_IW)
__pte |= IOMMU_PTE_IW;
+ if (prot & IOMMU_PROT_MMIO)
+ __pte = __sme_clr(__pte);
for (i = 0; i < count; ++i)
pte[i] = __pte;
@@ -65,7 +65,10 @@ static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
{
u64 pte;
- pte = __sme_set(paddr & PM_ADDR_MASK);
+ pte = paddr & PM_ADDR_MASK;
+ if (!(prot & IOMMU_PROT_MMIO))
+ pte = __sme_set(pte);
+
pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
@@ -2578,6 +2578,8 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IR;
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
+ if (iommu_prot & IOMMU_MMIO)
+ prot |= IOMMU_PROT_MMIO;
if (ops->map_pages) {
ret = ops->map_pages(ops, iova, paddr, pgsize,
When SME is enabled, memory encryption bit is set in IOMMU page table pte entry, it works fine if the pfn of the pte entry is memory. However, if the pfn is MMIO address, for example, map other device's mmio space to its io page table, in such situation, setting memory encryption bit in pte would cause P2P failure. Clear memory encryption bit in io page table if the mapping is MMIO rather than memory. Signed-off-by: Wencheng Yang <east.moutain.yang@gmail.com> --- drivers/iommu/amd/amd_iommu_types.h | 7 ++++--- drivers/iommu/amd/io_pgtable.c | 2 ++ drivers/iommu/amd/io_pgtable_v2.c | 5 ++++- drivers/iommu/amd/iommu.c | 2 ++ 4 files changed, 12 insertions(+), 4 deletions(-)