diff mbox

[v3,2/8] mm: Add missing TLB invalidate to RCU page-table freeing

Message ID 20120731104755.16662.16157.stgit@abhimanyu.in.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nikunj A. Dadhania July 31, 2012, 10:48 a.m. UTC
From: Peter Zijlstra <a.p.zijlstra@chello.nl>

For normal systems we need a TLB invalidate before freeing the
page-tables, the generic RCU based page-table freeing code lacked
this.

This is because this code originally came from ppc where the hardware
never walks the linux page-tables and thus this invalidate is not
required.

Others, notably s390 which ran into this problem in cd94154cc6a
("[S390] fix tlb flushing for page table pages"), do very much need
this TLB invalidation.

Therefore add it, with a Kconfig option to disable it so as to not
unduly slow down PPC and SPARC64 which neither of them need it.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-z32nke0csqopykthsk1zjg8f@git.kernel.org

[Fix to check *batch is not NULL]
Signed-off-by: Nikunj A. Dadhania <nikunj@linux.vnet.ibm.com>
---
 arch/Kconfig         |    3 +++
 arch/powerpc/Kconfig |    1 +
 arch/sparc/Kconfig   |    1 +
 mm/memory.c          |   43 +++++++++++++++++++++++++++++++++++++------
 4 files changed, 42 insertions(+), 6 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/Kconfig b/arch/Kconfig
index 8c3d957..fec1c9b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -231,6 +231,9 @@  config HAVE_ARCH_MUTEX_CPU_RELAX
 config HAVE_RCU_TABLE_FREE
 	bool
 
+config STRICT_TLB_FILL
+	bool
+
 config ARCH_HAVE_NMI_SAFE_CMPXCHG
 	bool
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9a5d3cd..fb70260 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -127,6 +127,7 @@  config PPC
 	select GENERIC_IRQ_SHOW_LEVEL
 	select IRQ_FORCED_THREADING
 	select HAVE_RCU_TABLE_FREE if SMP
+	select STRICT_TLB_FILL
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_BPF_JIT if PPC64
 	select HAVE_ARCH_JUMP_LABEL
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e74ff13..126e500 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -52,6 +52,7 @@  config SPARC64
 	select HAVE_KRETPROBES
 	select HAVE_KPROBES
 	select HAVE_RCU_TABLE_FREE if SMP
+	select STRICT_TLB_FILL
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK_NODE_MAP
 	select HAVE_SYSCALL_WRAPPERS
diff --git a/mm/memory.c b/mm/memory.c
index 91f6945..2ef9ce1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -332,12 +332,47 @@  static void tlb_remove_table_rcu(struct rcu_head *head)
 	free_page((unsigned long)batch);
 }
 
+#ifdef CONFIG_STRICT_TLB_FILL
+/*
+ * Some archictures (sparc64, ppc) cannot refill TLBs after the they've removed
+ * the PTE entries from their hash-table. Their hardware never looks at the
+ * linux page-table structures, so they don't need a hardware TLB invalidate
+ * when tearing down the page-table structure itself.
+ */
+static inline void tlb_table_flush_mmu(struct mmu_gather *tlb) { }
+
+/*
+ * When there's less than two users of this mm there cannot be
+ * a concurrent page-table walk.
+ */
+static inline bool tlb_table_fast(struct mmu_gather *tlb)
+{
+	return atomic_read(&tlb->mm->mm_users) < 2;
+}
+#else
+static inline void tlb_table_flush_mmu(struct mmu_gather *tlb)
+{
+	tlb_flush_mmu(tlb);
+}
+
+/*
+ * Even if there's only a single user, speculative TLB loads can
+ * wreck stuff.
+ */
+static inline bool tlb_table_fast(struct mmu_gather *tlb)
+{
+	return false;
+}
+#endif /* CONFIG_STRICT_TLB_FILL */
+
 void tlb_table_flush(struct mmu_gather *tlb)
 {
 	struct mmu_table_batch **batch = &tlb->batch;
 
 	if (*batch) {
-		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+		tlb_table_flush_mmu(tlb);
+		if (*batch)
+			call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
 		*batch = NULL;
 	}
 }
@@ -348,11 +383,7 @@  void tlb_remove_table(struct mmu_gather *tlb, void *table)
 
 	tlb->need_flush = 1;
 
-	/*
-	 * When there's less then two users of this mm there cannot be a
-	 * concurrent page-table walk.
-	 */
-	if (atomic_read(&tlb->mm->mm_users) < 2) {
+	if (tlb_table_fast(tlb)) {
 		__tlb_remove_table(table);
 		return;
 	}