diff mbox series

Revert "MIPS: Flush wrong invalid FTLB entry for huge page"

Message ID 20200825043807.5741-3-huangpei@loongson.cn (mailing list archive)
State New
Headers show
Series Revert "MIPS: Flush wrong invalid FTLB entry for huge page" | expand

Commit Message

Huang Pei Aug. 25, 2020, 4:38 a.m. UTC
This reverts commit 0115f6cbf26663c86496bc56eeea293f85b77897.

The problem fixed in 0115f6cbf26663c86496bc56eeea293f85b77897 is that
it is expected to replace a base page mapping entry in FTLB with a
huge page mapping entry using tlbwi, but doing it is not permitted by
hardware.

The same problem is hit by __update_tlb from do_page_fault first, but it
DOES NOT work it out, and cause another TLB Invalid and give
0115f6cbf26663c86496bc56eeea293f85b77897 the chane to fix it. Let
__update_tlb fix it and remove this extra TLB Invalid exception.

Move the fix from handle_tlb[lms] to __update_tlb, and now huge page
fault path only take 1 TLB Miss plus 1 TLB Invalid, instead of 1 TLB Miss
plus 2 TLB Invalid

Whether fixed by 0115f6cbf26663c86496bc56eeea293f85b77897 or by __update_tlb,
the root cause is TLB Miss handler writes invalid mapping entry into TLB
without checking _PAGE_PRESENT, and this can not be fixed unless letting
CP0 Entrylo0 & Entrylo1 map one page instead of two, like mapping Huge Page

Signed-off-by: Huang Pei <huangpei@loongson.cn>
---
 arch/mips/mm/tlb-r4k.c | 15 ++++++++++++++-
 arch/mips/mm/tlbex.c   | 25 ++++---------------------
 2 files changed, 18 insertions(+), 22 deletions(-)
diff mbox series

Patch

diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 38e2894d5fa3..cb8afa326b2c 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -328,6 +328,7 @@  void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 	/* this could be a huge page  */
 	if (pmd_huge(*pmdp)) {
 		unsigned long lo;
+		unsigned long entryhi;
 		write_c0_pagemask(PM_HUGE_MASK);
 		ptep = (pte_t *)pmdp;
 		lo = pte_to_entrylo(pte_val(*ptep));
@@ -335,7 +336,19 @@  void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
 
 		mtc0_tlbw_hazard();
-		if (idx < 0)
+		if (idx >= current_cpu_data.tlbsizevtlb) {
+		/* hit in FTLB.
+		 * Invalid it then tlbwr, since FTLB hold only base page*/
+			entryhi = read_c0_entryhi();
+			write_c0_entryhi(MIPS_ENTRYHI_EHINV);
+			tlb_write_indexed();
+			tlbw_use_hazard();
+			write_c0_entryhi(entryhi);
+
+		}
+
+
+		if (idx < 0 || idx >= current_cpu_data.tlbsizevtlb)
 			tlb_write_random();
 		else
 			tlb_write_indexed();
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 14f8ba93367f..9c4cd08c00d3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -762,8 +762,7 @@  static void build_huge_update_entries(u32 **p, unsigned int pte,
 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 				    struct uasm_label **l,
 				    unsigned int pte,
-				    unsigned int ptr,
-				    unsigned int flush)
+				    unsigned int ptr)
 {
 #ifdef CONFIG_SMP
 	UASM_i_SC(p, pte, 0, ptr);
@@ -772,22 +771,6 @@  static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 #else
 	UASM_i_SW(p, pte, 0, ptr);
 #endif
-	if (cpu_has_ftlb && flush) {
-		BUG_ON(!cpu_has_tlbinv);
-
-		UASM_i_MFC0(p, ptr, C0_ENTRYHI);
-		uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
-		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
-		build_tlb_write_entry(p, l, r, tlb_indexed);
-
-		uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
-		UASM_i_MTC0(p, ptr, C0_ENTRYHI);
-		build_huge_update_entries(p, pte, ptr);
-		build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
-
-		return;
-	}
-
 	build_huge_update_entries(p, pte, ptr);
 	build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 }
@@ -2278,7 +2261,7 @@  static void build_r4000_tlb_load_handler(void)
 		uasm_l_tlbl_goaround2(&l, p);
 	}
 	uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
-	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
 #endif
 
 	uasm_l_nopage_tlbl(&l, p);
@@ -2334,7 +2317,7 @@  static void build_r4000_tlb_store_handler(void)
 	build_tlb_probe_entry(&p);
 	uasm_i_ori(&p, wr.r1, wr.r1,
 		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
-	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
+	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
 #endif
 
 	uasm_l_nopage_tlbs(&l, p);
@@ -2391,7 +2374,7 @@  static void build_r4000_tlb_modify_handler(void)
 	build_tlb_probe_entry(&p);
 	uasm_i_ori(&p, wr.r1, wr.r1,
 		   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
-	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
+	build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
 #endif
 
 	uasm_l_nopage_tlbm(&l, p);