@@ -41,8 +41,10 @@
* Start addr | End addr | Slot | area description
* ============================================================================
* ..... L2 511 Unused
- * 0xffffffffc0600000 0xffffffffc0800000 L2 511 Fixmap
- * 0xffffffffc0200000 0xffffffffc0600000 L2 511 FDT
+ * 0xffffffffc0a00000 0xffffffffc0c00000 L2 511 Fixmap
+ * ..... ( 2 MB gap )
+ * 0xffffffffc0400000 0xffffffffc0800000 L2 511 FDT
+ * ..... ( 2 MB gap )
* 0xffffffffc0000000 0xffffffffc0200000 L2 511 Xen
* ..... L2 510 Unused
* 0x3200000000 0x7f40000000 L2 200-509 Direct map
@@ -74,6 +76,16 @@
#error "unsupported RV_STAGE1_MODE"
#endif
+#define GAP_SIZE MB(2)
+
+#define XEN_VIRT_SIZE MB(2)
+
+#define BOOT_FDT_VIRT_START (XEN_VIRT_START + XEN_VIRT_SIZE + GAP_SIZE)
+#define BOOT_FDT_VIRT_SIZE MB(4)
+
+#define FIXMAP_BASE \
+ (BOOT_FDT_VIRT_START + BOOT_FDT_VIRT_SIZE + GAP_SIZE)
+
#define DIRECTMAP_SLOT_END 509
#define DIRECTMAP_SLOT_START 200
#define DIRECTMAP_VIRT_START SLOTN(DIRECTMAP_SLOT_START)
new file mode 100644
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ */
+#ifndef ASM_FIXMAP_H
+#define ASM_FIXMAP_H
+
+#include <xen/bug.h>
+#include <xen/page-size.h>
+#include <xen/pmap.h>
+
+#include <asm/page.h>
+
+#define FIXMAP_ADDR(n) (FIXMAP_BASE + (n) * PAGE_SIZE)
+
+/* Fixmap slots */
+#define FIX_PMAP_BEGIN (0) /* Start of PMAP */
+#define FIX_PMAP_END (FIX_PMAP_BEGIN + NUM_FIX_PMAP - 1) /* End of PMAP */
+#define FIX_MISC (FIX_PMAP_END + 1) /* Ephemeral mappings of hardware */
+
+#define FIX_LAST FIX_MISC
+
+#define FIXADDR_START FIXMAP_ADDR(0)
+#define FIXADDR_TOP FIXMAP_ADDR(FIX_LAST + 1)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Direct access to xen_fixmap[] should only happen when {set,
+ * clear}_fixmap() is unusable (e.g. where we would end up to
+ * recursively call the helpers).
+ */
+extern pte_t xen_fixmap[];
+
+#define fix_to_virt(slot) ((void *)FIXMAP_ADDR(slot))
+
+static inline unsigned int virt_to_fix(vaddr_t vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+
+ return ((vaddr - FIXADDR_START) >> PAGE_SHIFT);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* ASM_FIXMAP_H */
@@ -255,4 +255,6 @@ static inline unsigned int arch_get_dma_bitsize(void)
return 32; /* TODO */
}
+void setup_fixmap_mappings(void);
+
#endif /* _ASM_RISCV_MM_H */
@@ -9,6 +9,7 @@
#include <xen/bug.h>
#include <xen/types.h>
+#include <asm/atomic.h>
#include <asm/mm.h>
#include <asm/page-bits.h>
@@ -81,6 +82,18 @@ static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache)
BUG_ON("unimplemented");
}
+/* Write a pagetable entry. */
+static inline void write_pte(pte_t *p, pte_t pte)
+{
+ write_atomic(p, pte);
+}
+
+/* Read a pagetable entry. */
+static inline pte_t read_pte(const pte_t *p)
+{
+ return read_atomic(p);
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PAGE_H */
@@ -12,6 +12,7 @@
#include <asm/early_printk.h>
#include <asm/csr.h>
#include <asm/current.h>
+#include <asm/fixmap.h>
#include <asm/page.h>
#include <asm/processor.h>
@@ -49,6 +50,9 @@ stage1_pgtbl_root[PAGETABLE_ENTRIES];
pte_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
stage1_pgtbl_nonroot[PGTBL_INITIAL_COUNT * PAGETABLE_ENTRIES];
+pte_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
+xen_fixmap[PAGETABLE_ENTRIES];
+
#define HANDLE_PGTBL(curr_lvl_num) \
index = pt_index(curr_lvl_num, page_addr); \
if ( pte_is_valid(pgtbl[index]) ) \
@@ -191,6 +195,45 @@ static bool __init check_pgtbl_mode_support(struct mmu_desc *mmu_desc,
return is_mode_supported;
}
+void __init setup_fixmap_mappings(void)
+{
+ pte_t *pte, tmp;
+ unsigned int i;
+
+ BUILD_BUG_ON(FIX_LAST >= PAGETABLE_ENTRIES);
+
+ pte = &stage1_pgtbl_root[pt_index(HYP_PT_ROOT_LEVEL, FIXMAP_ADDR(0))];
+
+ /*
+ * In RISC-V page table levels are numbered from Lx to L0 where
+ * x is the highest page table level for currect MMU mode ( for example,
+ * for Sv39 has 3 page tables so the x = 2 (L2 -> L1 -> L0) ).
+ *
+ * In this cycle we want to find L1 page table because as L0 page table
+ * xen_fixmap[] will be used.
+ */
+ for ( i = HYP_PT_ROOT_LEVEL; i-- > 1; )
+ {
+ BUG_ON(!pte_is_valid(*pte));
+
+ pte = (pte_t *)LOAD_TO_LINK(pte_to_paddr(*pte));
+ pte = &pte[pt_index(i, FIXMAP_ADDR(0))];
+ }
+
+ BUG_ON(pte_is_valid(*pte));
+
+ tmp = paddr_to_pte(LINK_TO_LOAD((unsigned long)&xen_fixmap), PTE_TABLE);
+ write_pte(pte, tmp);
+
+ RISCV_FENCE(rw, rw);
+ sfence_vma();
+
+ /*
+ * We only need the zeroeth table allocated, but not the PTEs set, because
+ * set_fixmap() will set them on the fly.
+ */
+}
+
/*
* setup_initial_pagetables:
*
@@ -47,6 +47,8 @@ void __init noreturn start_xen(unsigned long bootcpu_id,
test_macros_from_bug_h();
#endif
+ setup_fixmap_mappings();
+
printk("All set up\n");
machine_halt();
@@ -174,6 +174,6 @@ ASSERT(!SIZEOF(.got.plt), ".got.plt non-empty")
* Changing the size of Xen binary can require an update of
* PGTBL_INITIAL_COUNT.
*/
-ASSERT(_end - _start <= MB(2), "Xen too large for early-boot assumptions")
+ASSERT(_end - _start <= XEN_VIRT_SIZE, "Xen too large for early-boot assumptions")
ASSERT(_ident_end - _ident_start <= IDENT_AREA_SIZE, "identity region is too big");