diff mbox series

[v4,04/18] arm64: Do not enable uaccess for flush_icache_range

Message ID 20210524083001.2586635-5-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series Tidy up cache.S | expand

Commit Message

Fuad Tabba May 24, 2021, 8:29 a.m. UTC
__flush_icache_range works on kernel addresses, and doesn't need
uaccess. The existing code is a side-effect of its current
implementation with __flush_cache_user_range fallthrough.

Instead of fallthrough to share the code, use a common macro for
the two where the caller specifies an optional fixup label if
user access is needed. If provided, this label would be used to
generate an extable entry.

Simplify the code to use dcache_by_line_op, instead of
replicating much of its functionality.

No functional change intended.
Possible performance impact due to the reduced number of
instructions.

Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Will Deacon <will@kernel.org>
Reported-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
Link: https://lore.kernel.org/linux-arm-kernel/20210521121846.GB1040@C02TD0UTHF1T.local/
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/mm/cache.S | 57 ++++++++++++++++++++++++++-----------------
 1 file changed, 34 insertions(+), 23 deletions(-)

Comments

Catalin Marinas May 25, 2021, 11:20 a.m. UTC | #1
On Mon, May 24, 2021 at 09:29:47AM +0100, Fuad Tabba wrote:
> __flush_icache_range works on kernel addresses, and doesn't need
> uaccess. The existing code is a side-effect of its current
> implementation with __flush_cache_user_range fallthrough.
> 
> Instead of fallthrough to share the code, use a common macro for
> the two where the caller specifies an optional fixup label if
> user access is needed. If provided, this label would be used to
> generate an extable entry.
> 
> Simplify the code to use dcache_by_line_op, instead of
> replicating much of its functionality.
> 
> No functional change intended.
> Possible performance impact due to the reduced number of
> instructions.
> 
> Reported-by: Catalin Marinas <catalin.marinas@arm.com>
> Reported-by: Will Deacon <will@kernel.org>
> Reported-by: Mark Rutland <mark.rutland@arm.com>
> Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
> Link: https://lore.kernel.org/linux-arm-kernel/20210521121846.GB1040@C02TD0UTHF1T.local/
> Signed-off-by: Fuad Tabba <tabba@google.com>

Wrong version, acking again here:

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2d881f34dd9d..7c54bcbf5a36 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -14,6 +14,34 @@ 
 #include <asm/alternative.h>
 #include <asm/asm-uaccess.h>
 
+/*
+ *	__flush_cache_range(start,end) [fixup]
+ *
+ *	Ensure that the I and D caches are coherent within specified region.
+ *	This is typically used when code has been written to a memory region,
+ *	and will be executed.
+ *
+ *	- start   - virtual start address of region
+ *	- end     - virtual end address of region
+ *	- fixup   - optional label to branch to on user fault
+ */
+.macro	__flush_cache_range, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+	dsb     ishst
+	b       .Ldc_skip_\@
+alternative_else_nop_endif
+	mov     x2, x0
+	sub     x3, x1, x0
+	dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+	isb
+	b	.Lic_skip_\@
+alternative_else_nop_endif
+	invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
+
 /*
  *	flush_icache_range(start,end)
  *
@@ -25,7 +53,9 @@ 
  *	- end     - virtual end address of region
  */
 SYM_FUNC_START(__flush_icache_range)
-	/* FALLTHROUGH */
+	__flush_cache_range
+	ret
+SYM_FUNC_END(__flush_icache_range)
 
 /*
  *	__flush_cache_user_range(start,end)
@@ -39,34 +69,15 @@  SYM_FUNC_START(__flush_icache_range)
  */
 SYM_FUNC_START(__flush_cache_user_range)
 	uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
-	dsb	ishst
-	b	7f
-alternative_else_nop_endif
-	dcache_line_size x2, x3
-	sub	x3, x2, #1
-	bic	x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
-	add	x4, x4, x2
-	cmp	x4, x1
-	b.lo	1b
-	dsb	ish
 
-7:
-alternative_if ARM64_HAS_CACHE_DIC
-	isb
-	b	8f
-alternative_else_nop_endif
-	invalidate_icache_by_line x0, x1, x2, x3, 9f
-8:	mov	x0, #0
+	__flush_cache_range 2f
+	mov	x0, xzr
 1:
 	uaccess_ttbr0_disable x1, x2
 	ret
-9:
+2:
 	mov	x0, #-EFAULT
 	b	1b
-SYM_FUNC_END(__flush_icache_range)
 SYM_FUNC_END(__flush_cache_user_range)
 
 /*