diff mbox series

[v2,1/4] powerpc/code-patching: introduce patch_instructions()

Message ID 20230309180028.180200-2-hbathini@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series enable bpf_prog_pack allocator for powerpc | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch, async

Commit Message

Hari Bathini March 9, 2023, 6 p.m. UTC
patch_instruction() entails setting up pte, patching the instruction,
clearing the pte and flushing the tlb. If multiple instructions need
to be patched, every instruction would have to go through the above
drill unnecessarily. Instead, introduce function patch_instructions()
that patches multiple instructions at one go while setting up the pte,
clearing the pte and flushing the tlb only once per page range of
instructions. Observed ~5X improvement in speed of execution using
patch_instructions() over patch_instructions(), when more instructions
are to be patched.

Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/include/asm/code-patching.h |   1 +
 arch/powerpc/lib/code-patching.c         | 151 ++++++++++++++++-------
 2 files changed, 106 insertions(+), 46 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 3f881548fb61..059fc4fe700e 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -74,6 +74,7 @@  int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
 int patch_branch(u32 *addr, unsigned long target, int flags);
 int patch_instruction(u32 *addr, ppc_inst_t instr);
 int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
+int patch_instructions(u32 *addr, u32 *code, bool fill_inst, size_t len);
 
 static inline unsigned long patch_site_addr(s32 *site)
 {
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index b00112d7ad46..33857b9b53de 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -278,77 +278,117 @@  static void unmap_patch_area(unsigned long addr)
 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 }
 
-static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
+static int __do_patch_instructions_mm(u32 *addr, u32 *code, bool fill_inst, size_t len)
 {
-	int err;
-	u32 *patch_addr;
-	unsigned long text_poke_addr;
-	pte_t *pte;
-	unsigned long pfn = get_patch_pfn(addr);
-	struct mm_struct *patching_mm;
-	struct mm_struct *orig_mm;
+	struct mm_struct *patching_mm, *orig_mm;
+	unsigned long text_poke_addr, pfn;
+	u32 *patch_addr, *end, *pend;
+	ppc_inst_t instr;
 	spinlock_t *ptl;
+	int ilen, err;
+	pte_t *pte;
 
 	patching_mm = __this_cpu_read(cpu_patching_context.mm);
 	text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
-	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
 
 	pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
 	if (!pte)
 		return -ENOMEM;
 
-	__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+	end = (void *)addr + len;
+	do {
+		pfn = get_patch_pfn(addr);
+		__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
 
-	/* order PTE update before use, also serves as the hwsync */
-	asm volatile("ptesync": : :"memory");
-
-	/* order context switch after arbitrary prior code */
-	isync();
-
-	orig_mm = start_using_temp_mm(patching_mm);
-
-	err = __patch_instruction(addr, instr, patch_addr);
+		/* order PTE update before use, also serves as the hwsync */
+		asm volatile("ptesync": : :"memory");
 
-	/* hwsync performed by __patch_instruction (sync) if successful */
-	if (err)
-		mb();  /* sync */
+		/* order context switch after arbitrary prior code */
+		isync();
+
+		orig_mm = start_using_temp_mm(patching_mm);
+
+		patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+		pend = (void *)addr + PAGE_SIZE - offset_in_page(addr);
+		if (end < pend)
+			pend = end;
+
+		while (addr < pend) {
+			instr = ppc_inst_read(code);
+			ilen = ppc_inst_len(instr);
+			err = __patch_instruction(addr, instr, patch_addr);
+			/* hwsync performed by __patch_instruction (sync) if successful */
+			if (err) {
+				mb();  /* sync */
+				break;
+			}
+
+			patch_addr = (void *)patch_addr + ilen;
+			addr = (void *)addr + ilen;
+			if (!fill_inst)
+				code = (void *)code + ilen;
+		}
 
-	/* context synchronisation performed by __patch_instruction (isync or exception) */
-	stop_using_temp_mm(patching_mm, orig_mm);
+		/* context synchronisation performed by __patch_instruction (isync or exception) */
+		stop_using_temp_mm(patching_mm, orig_mm);
 
-	pte_clear(patching_mm, text_poke_addr, pte);
-	/*
-	 * ptesync to order PTE update before TLB invalidation done
-	 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
-	 */
-	local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+		pte_clear(patching_mm, text_poke_addr, pte);
+		/*
+		 * ptesync to order PTE update before TLB invalidation done
+		 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
+		 */
+		local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+		if (err)
+			break;
+	} while (addr < end);
 
 	pte_unmap_unlock(pte, ptl);
 
 	return err;
 }
 
-static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+static int __do_patch_instructions(u32 *addr, u32 *code, bool fill_inst, size_t len)
 {
-	int err;
-	u32 *patch_addr;
-	unsigned long text_poke_addr;
+	unsigned long text_poke_addr, pfn;
+	u32 *patch_addr, *end, *pend;
+	ppc_inst_t instr;
+	int ilen, err;
 	pte_t *pte;
-	unsigned long pfn = get_patch_pfn(addr);
 
 	text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
-	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
-
 	pte = __this_cpu_read(cpu_patching_context.pte);
-	__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
-	/* See ptesync comment in radix__set_pte_at() */
-	if (radix_enabled())
-		asm volatile("ptesync": : :"memory");
 
-	err = __patch_instruction(addr, instr, patch_addr);
+	end = (void *)addr + len;
+	do {
+		pfn = get_patch_pfn(addr);
+		__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+		/* See ptesync comment in radix__set_pte_at() */
+		if (radix_enabled())
+			asm volatile("ptesync": : :"memory");
+
+		patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+		pend = (void *)addr + PAGE_SIZE - offset_in_page(addr);
+		if (end < pend)
+			pend = end;
+
+		while (addr < pend) {
+			instr = ppc_inst_read(code);
+			ilen = ppc_inst_len(instr);
+			err = __patch_instruction(addr, instr, patch_addr);
+			if (err)
+				break;
+
+			patch_addr = (void *)patch_addr + ilen;
+			addr = (void *)addr + ilen;
+			if (!fill_inst)
+				code = (void *)code + ilen;
+		}
 
-	pte_clear(&init_mm, text_poke_addr, pte);
-	flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+		pte_clear(&init_mm, text_poke_addr, pte);
+		flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+		if (err)
+			break;
+	} while (addr < end);
 
 	return err;
 }
@@ -369,15 +409,34 @@  int patch_instruction(u32 *addr, ppc_inst_t instr)
 
 	local_irq_save(flags);
 	if (mm_patch_enabled())
-		err = __do_patch_instruction_mm(addr, instr);
+		err = __do_patch_instructions_mm(addr, (u32 *)&instr, false, ppc_inst_len(instr));
 	else
-		err = __do_patch_instruction(addr, instr);
+		err = __do_patch_instructions(addr, (u32 *)&instr, false, ppc_inst_len(instr));
 	local_irq_restore(flags);
 
 	return err;
 }
 NOKPROBE_SYMBOL(patch_instruction);
 
+/*
+ * Patch 'addr' with 'len' bytes of instructions from 'code'.
+ */
+int patch_instructions(u32 *addr, u32 *code, bool fill_inst, size_t len)
+{
+	unsigned long flags;
+	int err;
+
+	local_irq_save(flags);
+	if (mm_patch_enabled())
+		err = __do_patch_instructions_mm(addr, code, fill_inst, len);
+	else
+		err = __do_patch_instructions(addr, code, fill_inst, len);
+	local_irq_restore(flags);
+
+	return err;
+}
+NOKPROBE_SYMBOL(patch_instructions);
+
 int patch_branch(u32 *addr, unsigned long target, int flags)
 {
 	ppc_inst_t instr;