diff mbox series

[v2,2/6] parisc: add spinlock to patch function

Message ID 20190604183435.20175-3-svens@stackframe.org (mailing list archive)
State Superseded
Headers show
Series Dynamic FTRACE for PA-RISC | expand

Commit Message

Sven Schnelle June 4, 2019, 6:34 p.m. UTC
If multiple CPUs are patching code we need the spinlock
to protect against parallel fixmap maps/unmap calls.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
---
 arch/parisc/kernel/patch.c | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
index 624de41e0524..fb503f7ff60e 100644
--- a/arch/parisc/kernel/patch.c
+++ b/arch/parisc/kernel/patch.c
@@ -23,7 +23,9 @@  struct patch {
 
 static DEFINE_RAW_SPINLOCK(patch_lock);
 
-static void __kprobes *patch_map(void *addr, int fixmap, int *need_unmap)
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
+				 int *need_unmap)
+{
 	unsigned long uintaddr = (uintptr_t) addr;
 	bool module = !core_kernel_text(uintaddr);
 	struct page *page;
@@ -38,19 +40,29 @@  static void __kprobes *patch_map(void *addr, int fixmap, int *need_unmap)
 
 	*need_unmap = 1;
 	set_fixmap(fixmap, page_to_phys(page));
+	if (flags)
+		raw_spin_lock_irqsave(&patch_lock, *flags);
+	else
+		__acquire(&patch_lock);
 
 	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
 }
 
-static void __kprobes patch_unmap(int fixmap)
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
 {
 	clear_fixmap(fixmap);
+
+	if (flags)
+		raw_spin_unlock_irqrestore(&patch_lock, *flags);
+	else
+		__release(&patch_lock);
 }
 
 void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
 {
 	unsigned long start = (unsigned long)addr;
 	unsigned long end = (unsigned long)addr + len;
+	unsigned long flags;
 	u32 *p, *fixmap;
 	int mapped;
 
@@ -58,7 +70,7 @@  void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
 	flush_kernel_vmap_range(addr, len);
 	flush_icache_range(start, end);
 
-	p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &mapped);
+	p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
 
 	while (len >= 4) {
 		*p++ = *insn++;
@@ -72,14 +84,15 @@  void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
 			flush_kernel_vmap_range((void *)fixmap,
 						(p-fixmap) * sizeof(*p));
 			if (mapped)
-				patch_unmap(FIX_TEXT_POKE0);
-			p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &mapped);
+				patch_unmap(FIX_TEXT_POKE0, &flags);
+			p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
+						&mapped);
 		}
 	}
 
 	flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
 	if (mapped)
-		patch_unmap(FIX_TEXT_POKE0);
+		patch_unmap(FIX_TEXT_POKE0, &flags);
 	flush_icache_range(start, end);
 }