diff mbox

[kvm-unit-tests,03/11] arm/arm64: fix virt_to_phys

Message ID 20180116185312.7257-4-drjones@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Jones Jan. 16, 2018, 6:53 p.m. UTC
Since switching to the vm_memalign() allocator virt_to_phys() hasn't
been returning the correct address, as it was assuming an identity map.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/arm/asm/page.h      |  8 +++-----
 lib/arm/asm/pgtable.h   | 16 ++++++++++++----
 lib/arm/mmu.c           | 20 ++++++++++++++++++++
 lib/arm64/asm/page.h    |  8 +++-----
 lib/arm64/asm/pgtable.h | 12 ++++++++++--
 5 files changed, 48 insertions(+), 16 deletions(-)
diff mbox

Patch

diff --git a/lib/arm/asm/page.h b/lib/arm/asm/page.h
index fc1b30e95567..039c9f7b3d49 100644
--- a/lib/arm/asm/page.h
+++ b/lib/arm/asm/page.h
@@ -34,16 +34,14 @@  typedef struct { pteval_t pgprot; } pgprot_t;
 #define __pgd(x)		((pgd_t) { (x) } )
 #define __pgprot(x)		((pgprot_t) { (x) } )
 
-#ifndef __virt_to_phys
-#define __phys_to_virt(x)	((unsigned long) (x))
-#define __virt_to_phys(x)	(x)
-#endif
-
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
 
 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
 
+extern phys_addr_t __virt_to_phys(unsigned long addr);
+extern unsigned long __phys_to_virt(phys_addr_t addr);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASMARM_PAGE_H_ */
diff --git a/lib/arm/asm/pgtable.h b/lib/arm/asm/pgtable.h
index a95e63002ef3..b614bce9528a 100644
--- a/lib/arm/asm/pgtable.h
+++ b/lib/arm/asm/pgtable.h
@@ -14,6 +14,14 @@ 
  * This work is licensed under the terms of the GNU GPL, version 2.
  */
 
+/*
+ * We can convert va <=> pa page table addresses with simple casts
+ * because we always allocate their pages with alloc_page(), and
+ * alloc_page() always returns identity mapped pages.
+ */
+#define pgtable_va(x)		((void *)(unsigned long)(x))
+#define pgtable_pa(x)		((unsigned long)(x))
+
 #define pgd_none(pgd)		(!pgd_val(pgd))
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define pte_none(pte)		(!pte_val(pte))
@@ -32,7 +40,7 @@  static inline pgd_t *pgd_alloc(void)
 
 static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
 {
-	return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+	return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
 #define pmd_index(addr) \
@@ -52,14 +60,14 @@  static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
 {
 	if (pgd_none(*pgd)) {
 		pmd_t *pmd = pmd_alloc_one();
-		pgd_val(*pgd) = __pa(pmd) | PMD_TYPE_TABLE;
+		pgd_val(*pgd) = pgtable_pa(pmd) | PMD_TYPE_TABLE;
 	}
 	return pmd_offset(pgd, addr);
 }
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
-	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+	return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
 #define pte_index(addr) \
@@ -79,7 +87,7 @@  static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
 {
 	if (pmd_none(*pmd)) {
 		pte_t *pte = pte_alloc_one();
-		pmd_val(*pmd) = __pa(pte) | PMD_TYPE_TABLE;
+		pmd_val(*pmd) = pgtable_pa(pte) | PMD_TYPE_TABLE;
 	}
 	return pte_offset(pmd, addr);
 }
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index b9387efe0065..9da3be38b339 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -171,3 +171,23 @@  void *setup_mmu(phys_addr_t phys_end)
 	mmu_enable(mmu_idmap);
 	return mmu_idmap;
 }
+
+phys_addr_t __virt_to_phys(unsigned long addr)
+{
+	if (mmu_enabled()) {
+		pgd_t *pgtable = current_thread_info()->pgtable;
+		return virt_to_pte_phys(pgtable, (void *)addr);
+	}
+	return addr;
+}
+
+unsigned long __phys_to_virt(phys_addr_t addr)
+{
+	/*
+	 * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
+	 * the default page tables do identity map all physical addresses, which
+	 * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
+	 */
+	assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
+	return addr;
+}
diff --git a/lib/arm64/asm/page.h b/lib/arm64/asm/page.h
index f06a6941971c..46af552b91c7 100644
--- a/lib/arm64/asm/page.h
+++ b/lib/arm64/asm/page.h
@@ -42,16 +42,14 @@  typedef struct { pgd_t pgd; } pmd_t;
 #define pmd_val(x)		(pgd_val((x).pgd))
 #define __pmd(x)		((pmd_t) { __pgd(x) } )
 
-#ifndef __virt_to_phys
-#define __phys_to_virt(x)	((unsigned long) (x))
-#define __virt_to_phys(x)	(x)
-#endif
-
 #define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))
 #define __pa(x)			__virt_to_phys((unsigned long)(x))
 
 #define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)	__va((pfn) << PAGE_SHIFT)
 
+extern phys_addr_t __virt_to_phys(unsigned long addr);
+extern unsigned long __phys_to_virt(phys_addr_t addr);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* _ASMARM64_PAGE_H_ */
diff --git a/lib/arm64/asm/pgtable.h b/lib/arm64/asm/pgtable.h
index 941a850c3f30..5860abe5b08b 100644
--- a/lib/arm64/asm/pgtable.h
+++ b/lib/arm64/asm/pgtable.h
@@ -18,6 +18,14 @@ 
 #include <asm/page.h>
 #include <asm/pgtable-hwdef.h>
 
+/*
+ * We can convert va <=> pa page table addresses with simple casts
+ * because we always allocate their pages with alloc_page(), and
+ * alloc_page() always returns identity mapped pages.
+ */
+#define pgtable_va(x)		((void *)(unsigned long)(x))
+#define pgtable_pa(x)		((unsigned long)(x))
+
 #define pgd_none(pgd)		(!pgd_val(pgd))
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define pte_none(pte)		(!pte_val(pte))
@@ -40,7 +48,7 @@  static inline pgd_t *pgd_alloc(void)
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
-	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+	return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
 #define pte_index(addr) \
@@ -60,7 +68,7 @@  static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
 {
 	if (pmd_none(*pmd)) {
 		pte_t *pte = pte_alloc_one();
-		pmd_val(*pmd) = __pa(pte) | PMD_TYPE_TABLE;
+		pmd_val(*pmd) = pgtable_pa(pte) | PMD_TYPE_TABLE;
 	}
 	return pte_offset(pmd, addr);
 }