diff mbox series

[v2,2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions

Message ID 20191122022406.590141-3-pasha.tatashin@soleen.com (mailing list archive)
State Superseded
Headers show
Series Use C inlines for uaccess | expand

Commit Message

Pasha Tatashin Nov. 22, 2019, 2:24 a.m. UTC
Replace the uaccess_ttbr0_disable/uaccess_ttbr0_enable via
inline variants, and remove asm macros.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 22 ----------------
 arch/arm64/include/asm/cacheflush.h  | 38 +++++++++++++++++++++++++---
 arch/arm64/mm/cache.S                | 30 ++++++++--------------
 arch/arm64/mm/flush.c                |  2 +-
 4 files changed, 46 insertions(+), 46 deletions(-)

Comments

Mark Rutland Nov. 27, 2019, 3:01 p.m. UTC | #1
Hi Pavel,

On Thu, Nov 21, 2019 at 09:24:05PM -0500, Pavel Tatashin wrote:
> Replace the uaccess_ttbr0_disable/uaccess_ttbr0_enable via
> inline variants, and remove asm macros.

A commit message should provide rationale, rather than just a
description of the patch. Something like:

| We currently duplicate the logic to enable/disable uaccess via TTBR0,
| with C functions and assembly macros. This is a maintenenace burden
| and is liable to lead to subtle bugs, so let's get rid of the assembly
| macros, and always use the C functions. This requires refactoring
| some assembly functions to have a C wrapper.

[...]

> +static inline int invalidate_icache_range(unsigned long start,
> +					  unsigned long end)
> +{
> +	int rv;
> +#if ARM64_HAS_CACHE_DIC
> +	rv = arch_invalidate_icache_range(start, end);
> +#else
> +	uaccess_ttbr0_enable();
> +	rv = arch_invalidate_icache_range(start, end);
> +	uaccess_ttbr0_disable();
> +#endif
> +	return rv;
> +}

This ifdeffery is not the same as an alternative_if, and even if it were
the ARM64_HAS_CACHE_DIC behaviour is not the same as the existing
assembly.

This should be:

static inline int invalidate_icache_range(unsigned long start,
					  unsigned long end)
{
	int ret;

	if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
		isb();
		return 0;
	}
	
	uaccess_ttbr0_enable();
	ret = arch_invalidate_icache_range(start, end);
	uaccess_ttbr0_disable();

	return ret;
}

The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
since this is entirely local to the arch code, and even then should only
be called from the C wrappers.

Thanks,
Mark.
Pasha Tatashin Nov. 27, 2019, 3:10 p.m. UTC | #2
Hi Mark,

Thank you for reviewing this work.

> A commit message should provide rationale, rather than just a
> description of the patch. Something like:
>
> | We currently duplicate the logic to enable/disable uaccess via TTBR0,
> | with C functions and assembly macros. This is a maintenenace burden
> | and is liable to lead to subtle bugs, so let's get rid of the assembly
> | macros, and always use the C functions. This requires refactoring
> | some assembly functions to have a C wrapper.

Thank you for suggestion, I will fix my commit log.
>
> [...]
>
> > +static inline int invalidate_icache_range(unsigned long start,
> > +                                       unsigned long end)
> > +{
> > +     int rv;
> > +#if ARM64_HAS_CACHE_DIC
> > +     rv = arch_invalidate_icache_range(start, end);
> > +#else
> > +     uaccess_ttbr0_enable();
> > +     rv = arch_invalidate_icache_range(start, end);
> > +     uaccess_ttbr0_disable();
> > +#endif
> > +     return rv;
> > +}
>
> This ifdeffery is not the same as an alternative_if, and even if it were
> the ARM64_HAS_CACHE_DIC behaviour is not the same as the existing
> assembly.
>
> This should be:
>
> static inline int invalidate_icache_range(unsigned long start,
>                                           unsigned long end)
> {
>         int ret;
>
>         if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
>                 isb();
>                 return 0;
>         }
>
>         uaccess_ttbr0_enable();
>         ret = arch_invalidate_icache_range(start, end);
>         uaccess_ttbr0_disable();
>
>         return ret;
> }

I will fix it, thanks.

>
> The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
> since this is entirely local to the arch code, and even then should only
> be called from the C wrappers.

Sure, I can change it to asm_*, I was using arch_* to be consistent
with __arch_copy_from_user() and friends.

Thank you,
Pasha
Mark Rutland Nov. 27, 2019, 3:14 p.m. UTC | #3
On Wed, Nov 27, 2019 at 10:10:07AM -0500, Pavel Tatashin wrote:
> Hi Mark,
> 
> Thank you for reviewing this work.
 
> > The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
> > since this is entirely local to the arch code, and even then should only
> > be called from the C wrappers.
> 
> Sure, I can change it to asm_*, I was using arch_* to be consistent
> with __arch_copy_from_user() and friends.

FWIW, that naming was from before the common uaccess code took on the
raw_* anming for the arch functions, and I was expecting that the arch_*
functions would end up being called from core code.

For now it's probably too churny to change that existing case.

Thanks,
Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 35e6145e1402..8f763e5b41b1 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -34,27 +34,5 @@ 
 	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
 	isb
 	.endm
-
-	.macro	uaccess_ttbr0_disable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp2		// avoid preemption
-	__uaccess_ttbr0_disable \tmp1
-	restore_irq \tmp2
-alternative_else_nop_endif
-	.endm
-
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp3		// avoid preemption
-	__uaccess_ttbr0_enable \tmp1, \tmp2
-	restore_irq \tmp3
-alternative_else_nop_endif
-	.endm
-#else
-	.macro	uaccess_ttbr0_disable, tmp1, tmp2
-	.endm
-
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-	.endm
 #endif
 #endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..cdd4a8eb8708 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -61,16 +61,48 @@ 
  *		- kaddr  - page address
  *		- size   - region size
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int  invalidate_icache_range(unsigned long start, unsigned long end);
+extern void __arch_flush_icache_range(unsigned long start, unsigned long end);
+extern long __arch_flush_cache_user_range(unsigned long start,
+					  unsigned long end);
+extern int  arch_invalidate_icache_range(unsigned long start,
+					 unsigned long end);
+
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pop(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(void *kaddr, unsigned long len);
 
+static inline void __flush_icache_range(unsigned long start, unsigned long end)
+{
+	uaccess_ttbr0_enable();
+	__arch_flush_icache_range(start, end);
+	uaccess_ttbr0_disable();
+}
+
+static inline void __flush_cache_user_range(unsigned long start,
+					    unsigned long end)
+{
+	uaccess_ttbr0_enable();
+	__arch_flush_cache_user_range(start, end);
+	uaccess_ttbr0_disable();
+}
+
+static inline int invalidate_icache_range(unsigned long start,
+					  unsigned long end)
+{
+	int rv;
+#if ARM64_HAS_CACHE_DIC
+	rv = arch_invalidate_icache_range(start, end);
+#else
+	uaccess_ttbr0_enable();
+	rv = arch_invalidate_icache_range(start, end);
+	uaccess_ttbr0_disable();
+#endif
+	return rv;
+}
+
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 	__flush_icache_range(start, end);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..408d317a47d2 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@ 
 #include <asm/asm-uaccess.h>
 
 /*
- *	flush_icache_range(start,end)
+ *	__arch_flush_icache_range(start,end)
  *
  *	Ensure that the I and D caches are coherent within specified region.
  *	This is typically used when code has been written to a memory region,
@@ -24,11 +24,11 @@ 
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-ENTRY(__flush_icache_range)
+ENTRY(__arch_flush_icache_range)
 	/* FALLTHROUGH */
 
 /*
- *	__flush_cache_user_range(start,end)
+ *	__arch_flush_cache_user_range(start,end)
  *
  *	Ensure that the I and D caches are coherent within specified region.
  *	This is typically used when code has been written to a memory region,
@@ -37,8 +37,7 @@  ENTRY(__flush_icache_range)
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-ENTRY(__flush_cache_user_range)
-	uaccess_ttbr0_enable x2, x3, x4
+ENTRY(__arch_flush_cache_user_range)
 alternative_if ARM64_HAS_CACHE_IDC
 	dsb	ishst
 	b	7f
@@ -60,14 +59,11 @@  alternative_if ARM64_HAS_CACHE_DIC
 alternative_else_nop_endif
 	invalidate_icache_by_line x0, x1, x2, x3, 9f
 8:	mov	x0, #0
-1:
-	uaccess_ttbr0_disable x1, x2
-	ret
-9:
-	mov	x0, #-EFAULT
+1:	ret
+9:	mov	x0, #-EFAULT
 	b	1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+ENDPROC(__arch_flush_icache_range)
+ENDPROC(__arch_flush_cache_user_range)
 
 /*
  *	invalidate_icache_range(start,end)
@@ -83,16 +79,10 @@  alternative_if ARM64_HAS_CACHE_DIC
 	isb
 	ret
 alternative_else_nop_endif
-
-	uaccess_ttbr0_enable x2, x3, x4
-
 	invalidate_icache_by_line x0, x1, x2, x3, 2f
 	mov	x0, xzr
-1:
-	uaccess_ttbr0_disable x1, x2
-	ret
-2:
-	mov	x0, #-EFAULT
+1:	ret
+2:	mov	x0, #-EFAULT
 	b	1b
 ENDPROC(invalidate_icache_range)
 
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..66249fca2092 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -75,7 +75,7 @@  EXPORT_SYMBOL(flush_dcache_page);
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(__arch_flush_icache_range);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)