@@ -95,6 +95,12 @@ static inline void *phys_to_virt(phys_addr_t x)
return (void *)__phys_to_virt(x);
}
+extern void set_memory_decrypted(unsigned long va, size_t size);
+#define set_memory_decrypted set_memory_decrypted
+
+extern void set_memory_encrypted(unsigned long va, size_t size);
+#define set_memory_encrypted set_memory_encrypted
+
#include <asm-generic/io.h>
#endif /* _ASMARM_IO_H_ */
@@ -23,6 +23,7 @@
#include <linux/compiler.h>
pgd_t *mmu_idmap;
+unsigned long idmap_end;
/* Used by Realms, depends on IPA size */
unsigned long prot_ns_shared = 0;
@@ -31,6 +32,11 @@ unsigned long phys_mask_shift = 48;
/* CPU 0 starts with disabled MMU */
static cpumask_t mmu_enabled_cpumask;
+static bool is_idmap_address(phys_addr_t pa)
+{
+ return pa < idmap_end;
+}
+
bool mmu_enabled(void)
{
/*
@@ -93,12 +99,17 @@ static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr)
return &pte_val(*pte);
}
-static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
+static void set_pte(uintptr_t vaddr, pteval_t *p_pte, pteval_t pte)
{
- pteval_t *p_pte = get_pte(pgtable, vaddr);
-
WRITE_ONCE(*p_pte, pte);
flush_tlb_page(vaddr);
+}
+
+static pteval_t *install_pte(pgd_t *pgtable, uintptr_t vaddr, pteval_t pte)
+{
+ pteval_t *p_pte = get_pte(pgtable, vaddr);
+
+ set_pte(vaddr, p_pte, pte);
return p_pte;
}
@@ -171,6 +182,39 @@ phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *virt)
((phys_addr_t)(unsigned long)virt & ~mask);
}
+/*
+ * __idmap_set_range_prot - Apply permissions to the given idmap range.
+ */
+static void __idmap_set_range_prot(unsigned long virt_offset, size_t size, pgprot_t prot)
+{
+ pteval_t *ptep;
+ pteval_t default_prot = PTE_TYPE_PAGE | PTE_AF | PTE_SHARED;
+
+ while (size > 0) {
+ pteval_t pte = virt_offset | default_prot | pgprot_val(prot);
+
+ if (!is_idmap_address(virt_offset))
+ break;
+ /* Break before make : Clear the PTE entry first */
+ ptep = install_pte(mmu_idmap, (uintptr_t)virt_offset, 0);
+ /* Now apply the changes */
+ set_pte((uintptr_t)virt_offset, ptep, pte);
+
+ size -= PAGE_SIZE;
+ virt_offset += PAGE_SIZE;
+ }
+}
+
+static void idmap_set_range_shared(unsigned long virt_offset, size_t size)
+{
+ return __idmap_set_range_prot(virt_offset, size, __pgprot(PTE_WBWA | PTE_USER | PTE_NS_SHARED));
+}
+
+static void idmap_set_range_protected(unsigned long virt_offset, size_t size)
+{
+ __idmap_set_range_prot(virt_offset, size, __pgprot(PTE_WBWA | PTE_USER));
+}
+
void mmu_set_range_ptes(pgd_t *pgtable, uintptr_t virt_offset,
phys_addr_t phys_start, phys_addr_t phys_end,
pgprot_t prot)
@@ -210,11 +254,12 @@ void mmu_set_range_sect(pgd_t *pgtable, uintptr_t virt_offset,
void *setup_mmu(phys_addr_t phys_end, void *unused)
{
struct mem_region *r;
+ unsigned long end = 0;
/* 3G-4G region is reserved for vmalloc, cap phys_end at 3G */
if (phys_end > (3ul << 30))
phys_end = 3ul << 30;
-
+ end = phys_end;
#ifdef __aarch64__
init_alloc_vpage((void*)(4ul << 30));
@@ -236,9 +281,12 @@ void *setup_mmu(phys_addr_t phys_end, void *unused)
mmu_set_range_ptes(mmu_idmap, r->start, r->start, r->end,
__pgprot(PTE_WBWA | PTE_USER));
}
+ if (r->end > end)
+ end = r->end;
}
mmu_enable(mmu_idmap);
+ idmap_end = end;
return mmu_idmap;
}
@@ -295,3 +343,19 @@ void mmu_clear_user(pgd_t *pgtable, unsigned long vaddr)
flush_tlb_page(vaddr);
}
}
+
+void set_memory_encrypted(unsigned long va, size_t size)
+{
+ if (is_realm()) {
+ arm_set_memory_protected(__virt_to_phys(va), size);
+ idmap_set_range_protected(va, size);
+ }
+}
+
+void set_memory_decrypted(unsigned long va, size_t size)
+{
+ if (is_realm()) {
+ arm_set_memory_shared(__virt_to_phys(va), size);
+ idmap_set_range_shared(va, size);
+ }
+}
@@ -89,6 +89,12 @@ static inline void *phys_to_virt(phys_addr_t x)
return (void *)__phys_to_virt(x);
}
+extern void set_memory_decrypted(unsigned long va, size_t size);
+#define set_memory_decrypted set_memory_decrypted
+
+extern void set_memory_encrypted(unsigned long va, size_t size);
+#define set_memory_encrypted set_memory_encrypted
+
#include <asm-generic/io.h>
#endif /* _ASMARM64_IO_H_ */
Enable memory encryption support for Realms. When a page is "decrypted", we set the RIPAS to EMPTY, hinting to the hypervisor that it could reclaim the page backing the IPA. Also the pagetable is updated with the PTE_NS_SHARED attrbiute, which in effect turns the "ipa" to the unprotected alias. Similarly for "encryption" we mark the IPA back to RIPAS_RAM and clear the PTE_NS_SHARED attribute. The addresses passed into the helpers must be idmap/linear map addresses. Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> --- lib/arm/asm/io.h | 6 ++++ lib/arm/mmu.c | 72 +++++++++++++++++++++++++++++++++++++++++++--- lib/arm64/asm/io.h | 6 ++++ 3 files changed, 80 insertions(+), 4 deletions(-)