diff mbox series

[v3,06/18] arm64: Do not enable uaccess for invalidate_icache_range

Message ID 20210520124406.2731873-7-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series Tidy up cache.S | expand

Commit Message

Fuad Tabba May 20, 2021, 12:43 p.m. UTC
invalidate_icache_range() works on the kernel linear map, and
doesn't need uaccess. Remove the code that toggles
uaccess_ttbr0_enable, as well as the code that emits an entry
into the exception table (via the macro
invalidate_icache_by_line).

Changes return type of invalidate_icache_range() from int (which
used to indicate a fault) to void, since it doesn't need uaccess
and won't fault. Note that return value was never checked by any
of the callers.

No functional change intended.
Possible performance impact due to the reduced number of
instructions.

Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/cacheflush.h |  2 +-
 arch/arm64/mm/cache.S               | 11 +----------
 2 files changed, 2 insertions(+), 11 deletions(-)

Comments

Mark Rutland May 20, 2021, 2:13 p.m. UTC | #1
On Thu, May 20, 2021 at 01:43:54PM +0100, Fuad Tabba wrote:
> invalidate_icache_range() works on the kernel linear map, and

Minor nit: this works on kernel addresses generally (e.g. vmalloc), so
we could say "kernel addresses" rather than "the kernel linear map".

> doesn't need uaccess. Remove the code that toggles
> uaccess_ttbr0_enable, as well as the code that emits an entry
> into the exception table (via the macro
> invalidate_icache_by_line).
> 
> Changes return type of invalidate_icache_range() from int (which
> used to indicate a fault) to void, since it doesn't need uaccess
> and won't fault. Note that return value was never checked by any
> of the callers.
> 
> No functional change intended.
> Possible performance impact due to the reduced number of
> instructions.
> 
> Reported-by: Catalin Marinas <catalin.marinas@arm.com>
> Reported-by: Will Deacon <will@kernel.org>
> Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
> Signed-off-by: Fuad Tabba <tabba@google.com>

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/include/asm/cacheflush.h |  2 +-
>  arch/arm64/mm/cache.S               | 11 +----------
>  2 files changed, 2 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
> index 52e5c1623224..a586afa84172 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -57,7 +57,7 @@
>   *		- size   - region size
>   */
>  extern void __flush_icache_range(unsigned long start, unsigned long end);
> -extern int  invalidate_icache_range(unsigned long start, unsigned long end);
> +extern void invalidate_icache_range(unsigned long start, unsigned long end);
>  extern void __flush_dcache_area(void *addr, size_t len);
>  extern void __inval_dcache_area(void *addr, size_t len);
>  extern void __clean_dcache_area_poc(void *addr, size_t len);
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index c6bc3b8138e1..7318a40dd6ca 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -97,21 +97,12 @@ SYM_FUNC_END(__flush_cache_user_range)
>   */
>  SYM_FUNC_START(invalidate_icache_range)
>  alternative_if ARM64_HAS_CACHE_DIC
> -	mov	x0, xzr
>  	isb
>  	ret
>  alternative_else_nop_endif
>  
> -	uaccess_ttbr0_enable x2, x3, x4
> -
> -	invalidate_icache_by_line x0, x1, x2, x3, 2f
> -	mov	x0, xzr
> -1:
> -	uaccess_ttbr0_disable x1, x2
> +	invalidate_icache_by_line x0, x1, x2, x3
>  	ret
> -2:
> -	mov	x0, #-EFAULT
> -	b	1b
>  SYM_FUNC_END(invalidate_icache_range)
>  
>  /*
> -- 
> 2.31.1.751.gd2f1c929bd-goog
>
Catalin Marinas May 25, 2021, 11:18 a.m. UTC | #2
On Thu, May 20, 2021 at 01:43:54PM +0100, Fuad Tabba wrote:
> invalidate_icache_range() works on the kernel linear map, and
> doesn't need uaccess. Remove the code that toggles
> uaccess_ttbr0_enable, as well as the code that emits an entry
> into the exception table (via the macro
> invalidate_icache_by_line).
> 
> Changes return type of invalidate_icache_range() from int (which
> used to indicate a fault) to void, since it doesn't need uaccess
> and won't fault. Note that return value was never checked by any
> of the callers.
> 
> No functional change intended.
> Possible performance impact due to the reduced number of
> instructions.
> 
> Reported-by: Catalin Marinas <catalin.marinas@arm.com>
> Reported-by: Will Deacon <will@kernel.org>
> Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
> Signed-off-by: Fuad Tabba <tabba@google.com>

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 52e5c1623224..a586afa84172 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -57,7 +57,7 @@ 
  *		- size   - region size
  */
 extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int  invalidate_icache_range(unsigned long start, unsigned long end);
+extern void invalidate_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index c6bc3b8138e1..7318a40dd6ca 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -97,21 +97,12 @@  SYM_FUNC_END(__flush_cache_user_range)
  */
 SYM_FUNC_START(invalidate_icache_range)
 alternative_if ARM64_HAS_CACHE_DIC
-	mov	x0, xzr
 	isb
 	ret
 alternative_else_nop_endif
 
-	uaccess_ttbr0_enable x2, x3, x4
-
-	invalidate_icache_by_line x0, x1, x2, x3, 2f
-	mov	x0, xzr
-1:
-	uaccess_ttbr0_disable x1, x2
+	invalidate_icache_by_line x0, x1, x2, x3
 	ret
-2:
-	mov	x0, #-EFAULT
-	b	1b
 SYM_FUNC_END(invalidate_icache_range)
 
 /*