@@ -2142,7 +2142,8 @@ config CPU_SUPPORTS_HUGEPAGES
depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
config MIPS_PGD_C0_CONTEXT
bool
- default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
+ depends on 64BIT
+ default y if (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
#
# Set to y for ptrace access to watch registers.
@@ -848,8 +848,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((s64)(CAC_BASE) << 53));
uasm_i_drotr(p, ptr, ptr, 11);
#elif defined(CONFIG_SMP)
UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
@@ -1164,8 +1164,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((s64)(CAC_BASE) << 53));
+
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1292,7 +1293,6 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
return rv;
}
-
/*
* For a 64-bit kernel, we are using the 64-bit XTLB refill exception
* because EXL == 0. If we wrap, we can also use the 32 instruction
+. LOONGSON64 use 0x98xx_xxxx_xxxx_xxxx as xphys cached +. let CONFIG_MIPS_PGD_C0_CONTEXT depend on 64bit CP0 Context has enough room for wraping pgd into its 41-bit PTEBase field. +. For XPHYS, the trick is that pgd is 4kB aligned, and the PABITS <= 48, only save 48 - 12 + 5(for bit[63:59]) = 41 bits, aka. : bit[63:59] | 0000 0000 000 | bit[47:12] | 0000 0000 0000 +. for CKSEG0, only save 29 - 12 = 17 bits Signed-off-by: Huang Pei <huangpei@loongson.cn> --- arch/mips/Kconfig | 3 ++- arch/mips/mm/tlbex.c | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-)