diff mbox series

[v1,1/3] xen/riscv: implement software page table walking

Message ID 00dfc71569bc9971b53e29b36a80e9e020ac61ac.1737391102.git.oleksii.kurochko@gmail.com (mailing list archive)
State New
Headers show
Series Fixes for vmap_to_mfn() and pt_mapping_level | expand

Commit Message

Oleksii Kurochko Jan. 20, 2025, 4:54 p.m. UTC
RISC-V doesn't have hardware feature to ask MMU to translate
virtual address to physical address ( like Arm has, for example ),
so software page table walking in implemented.

Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
 xen/arch/riscv/include/asm/mm.h |  2 ++
 xen/arch/riscv/pt.c             | 56 +++++++++++++++++++++++++++++++++
 2 files changed, 58 insertions(+)
diff mbox series

Patch

diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h
index 292aa48fc1..d46018c132 100644
--- a/xen/arch/riscv/include/asm/mm.h
+++ b/xen/arch/riscv/include/asm/mm.h
@@ -15,6 +15,8 @@ 
 
 extern vaddr_t directmap_virt_start;
 
+paddr_t pt_walk(vaddr_t va);
+
 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 
diff --git a/xen/arch/riscv/pt.c b/xen/arch/riscv/pt.c
index a703e0f1bd..865d60d1af 100644
--- a/xen/arch/riscv/pt.c
+++ b/xen/arch/riscv/pt.c
@@ -274,6 +274,62 @@  static int pt_update_entry(mfn_t root, vaddr_t virt,
     return rc;
 }
 
+paddr_t pt_walk(vaddr_t va)
+{
+    const mfn_t root = get_root_page();
+    /*
+     * In pt_walk() only XEN_TALE_MAP_NONE and XEN_TABLE_SUPER_PAGE are
+     * handled ( as they are only possible for page table walking ), so
+     * initialize `ret` with "impossible" XEN_TABLE_MAP_NOMEM.
+    */
+    int ret = XEN_TABLE_MAP_NOMEM;
+    unsigned int level = HYP_PT_ROOT_LEVEL;
+    paddr_t pa = 0;
+    pte_t *table;
+
+    DECLARE_OFFSETS(offsets, va);
+
+    table = map_table(root);
+
+    /*
+     * Find `pa` of an entry which corresponds to `va` by iterating for each
+     * page level and checking if the entry points to a next page table or
+     * to a page.
+     *
+     * Two cases are possible:
+     * - ret == XEN_TABLE_SUPER_PAGE means that the entry was find;
+     *   (Despite of the name) XEN_TABLE_SUPER_PAGE covers 4k mapping too.
+     * - ret == XEN_TABLE_MAP_NONE means that requested `va` wasn't actually
+     *   mapped.
+     */
+    while ( (ret != XEN_TABLE_MAP_NONE) && (ret != XEN_TABLE_SUPER_PAGE) )
+    {
+        /*
+         * This case shouldn't really occur as it will mean that for table
+         * level 0 a pointer to next page table has been written, but at
+         * level 0 it could be only a pointer to 4k page.
+         */
+        ASSERT(level <= HYP_PT_ROOT_LEVEL);
+
+        ret = pt_next_level(false, &table, offsets[level]);
+        level--;
+    }
+
+    if ( ret == XEN_TABLE_MAP_NONE )
+        dprintk(XENLOG_WARNING, "Is va(%#lx) really mapped?\n", va);
+    else if ( ret == XEN_TABLE_SUPER_PAGE )
+        pa = pte_to_paddr(*(table + offsets[level + 1]));
+
+    /*
+     * There is no need for unmap_table() after each pt_next_level() call as
+     * pt_next_level() will do unmap_table() for the previous table before
+     * returning next level table.
+     */
+    unmap_table(table);
+
+    return pa;
+}
+
 /* Return the level where mapping should be done */
 static int pt_mapping_level(unsigned long vfn, mfn_t mfn, unsigned long nr,
                             unsigned int flags)