@@ -11,7 +11,7 @@
#include <linux/const.h>
/* Size of region mapped by a page global directory */
-#define PGDIR_SHIFT 22
+#define PGDIR_SHIFT (10 + PAGE_SHIFT)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
@@ -13,9 +13,9 @@
extern bool pgtable_l4_enabled;
extern bool pgtable_l5_enabled;
-#define PGDIR_SHIFT_L3 30
-#define PGDIR_SHIFT_L4 39
-#define PGDIR_SHIFT_L5 48
+#define PGDIR_SHIFT_L3 (9 + 9 + PAGE_SHIFT)
+#define PGDIR_SHIFT_L4 (9 + PGDIR_SHIFT_L3)
+#define PGDIR_SHIFT_L5 (9 + PGDIR_SHIFT_L4)
#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
(pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
/* Size of region mapped by a page global directory */
@@ -23,20 +23,20 @@ extern bool pgtable_l5_enabled;
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* p4d is folded into pgd in case of 4-level page table */
-#define P4D_SHIFT_L3 30
-#define P4D_SHIFT_L4 39
-#define P4D_SHIFT_L5 39
+#define P4D_SHIFT_L3 (9 + 9 + PAGE_SHIFT)
+#define P4D_SHIFT_L4 (9 + P4D_SHIFT_L3)
+#define P4D_SHIFT_L5 (9 + P4D_SHIFT_L3)
#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
(pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE - 1))
/* pud is folded into pgd in case of 3-level page table */
-#define PUD_SHIFT 30
+#define PUD_SHIFT (9 + 9 + PAGE_SHIFT)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE - 1))
-#define PMD_SHIFT 21
+#define PMD_SHIFT (9 + PAGE_SHIFT)
/* Size of region mapped by a page middle directory */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
@@ -30,12 +30,27 @@
/* Number of entries in the page table */
#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+#ifdef CONFIG_RISCV_USE_SW_PAGE
+
+/*
+ * PGDIR_SHIFT grows as PAGE_SIZE grows. To avoid va exceeds limitation, pgd
+ * index bits should be cut. Thus we use HW_PAGE_SIZE instead.
+ */
+#define __PTRS_PER_PGD (HW_PAGE_SIZE / sizeof(pgd_t))
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (__PTRS_PER_PGD - 1))
+
+#define KERN_VIRT_SIZE ((__PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+
+#else
+
/*
* Half of the kernel address space (1/4 of the entries of the page global
* directory) is for the direct mapping.
*/
#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+#endif /* CONFIG_RISCV_USE_SW_PAGE */
+
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END PAGE_OFFSET
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
@@ -1304,7 +1319,11 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
* Similarly for SV57, bits 63–57 must be equal to bit 56.
*/
#ifdef CONFIG_64BIT
+#ifdef CONFIG_RISCV_USE_SW_PAGE
+#define TASK_SIZE_64 (PGDIR_SIZE * __PTRS_PER_PGD / 2)
+#else
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#endif
#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
This commit adjusts the SHIFT of pte index bits at each page table level. For example, in SV39, the traditional va behaves as: ---------------------------------------------- | pgd index | pmd index | pte index | offset | ---------------------------------------------- | 38 30 | 29 21 | 20 12 | 11 0 | ---------------------------------------------- When we choose 64K as basic software page, va now behaves as: ---------------------------------------------- | pgd index | pmd index | pte index | offset | ---------------------------------------------- | 38 34 | 33 25 | 24 16 | 15 0 | ---------------------------------------------- Signed-off-by: Xu Lu <luxu.kernel@bytedance.com> --- arch/riscv/include/asm/pgtable-32.h | 2 +- arch/riscv/include/asm/pgtable-64.h | 16 ++++++++-------- arch/riscv/include/asm/pgtable.h | 19 +++++++++++++++++++ 3 files changed, 28 insertions(+), 9 deletions(-)