diff mbox series

[v3,10/18] arm64: __inval_dcache_area to take end parameter instead of size

Message ID 20210520124406.2731873-11-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series Tidy up cache.S | expand

Commit Message

Fuad Tabba May 20, 2021, 12:43 p.m. UTC
To be consistent with other functions with similar names and
functionality in cacheflush.h, cache.S, and cachetlb.rst, change
to specify the range in terms of start and end, as opposed to
start and size.

Because the code is shared with __dma_inv_area, it changes the
parameters for that as well. However, __dma_inv_area is local to
cache.S, so no other users are affected.

No functional change intended.

Reported-by: Will Deacon <will@kernel.org>
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/cacheflush.h |  2 +-
 arch/arm64/kernel/head.S            |  5 +----
 arch/arm64/mm/cache.S               | 16 +++++++++-------
 arch/arm64/mm/flush.c               |  2 +-
 4 files changed, 12 insertions(+), 13 deletions(-)

Comments

Mark Rutland May 20, 2021, 3:46 p.m. UTC | #1
On Thu, May 20, 2021 at 01:43:58PM +0100, Fuad Tabba wrote:
> To be consistent with other functions with similar names and
> functionality in cacheflush.h, cache.S, and cachetlb.rst, change
> to specify the range in terms of start and end, as opposed to
> start and size.
> 
> Because the code is shared with __dma_inv_area, it changes the
> parameters for that as well. However, __dma_inv_area is local to
> cache.S, so no other users are affected.
> 
> No functional change intended.
> 
> Reported-by: Will Deacon <will@kernel.org>
> Signed-off-by: Fuad Tabba <tabba@google.com>

All the conversions below look correct to me, and judging by a grep of
the kernel tree there are no stale callers. I see the ADD->SUB dance in
__dma_map_area() will be undone in a subsequent patch. So:

Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> ---
>  arch/arm64/include/asm/cacheflush.h |  2 +-
>  arch/arm64/kernel/head.S            |  5 +----
>  arch/arm64/mm/cache.S               | 16 +++++++++-------
>  arch/arm64/mm/flush.c               |  2 +-
>  4 files changed, 12 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
> index a586afa84172..157234706817 100644
> --- a/arch/arm64/include/asm/cacheflush.h
> +++ b/arch/arm64/include/asm/cacheflush.h
> @@ -59,7 +59,7 @@
>  extern void __flush_icache_range(unsigned long start, unsigned long end);
>  extern void invalidate_icache_range(unsigned long start, unsigned long end);
>  extern void __flush_dcache_area(void *addr, size_t len);
> -extern void __inval_dcache_area(void *addr, size_t len);
> +extern void __inval_dcache_area(unsigned long start, unsigned long end);
>  extern void __clean_dcache_area_poc(void *addr, size_t len);
>  extern void __clean_dcache_area_pop(void *addr, size_t len);
>  extern void __clean_dcache_area_pou(void *addr, size_t len);
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 96873dfa67fd..8df0ac8d9123 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -117,7 +117,7 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
>  	dmb	sy				// needed before dc ivac with
>  						// MMU off
>  
> -	mov	x1, #0x20			// 4 x 8 bytes
> +	add	x1, x0, #0x20			// 4 x 8 bytes
>  	b	__inval_dcache_area		// tail call
>  SYM_CODE_END(preserve_boot_args)
>  
> @@ -268,7 +268,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 */
>  	adrp	x0, init_pg_dir
>  	adrp	x1, init_pg_end
> -	sub	x1, x1, x0
>  	bl	__inval_dcache_area
>  
>  	/*
> @@ -382,12 +381,10 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  
>  	adrp	x0, idmap_pg_dir
>  	adrp	x1, idmap_pg_end
> -	sub	x1, x1, x0
>  	bl	__inval_dcache_area
>  
>  	adrp	x0, init_pg_dir
>  	adrp	x1, init_pg_end
> -	sub	x1, x1, x0
>  	bl	__inval_dcache_area
>  
>  	ret	x28
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 80da4b8718b6..5170d9ab450a 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -138,25 +138,24 @@ alternative_else_nop_endif
>  SYM_FUNC_END(__clean_dcache_area_pou)
>  
>  /*
> - *	__inval_dcache_area(kaddr, size)
> + *	__inval_dcache_area(start, end)
>   *
> - * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
> + * 	Ensure that any D-cache lines for the interval [start, end)
>   * 	are invalidated. Any partial lines at the ends of the interval are
>   *	also cleaned to PoC to prevent data loss.
>   *
> - *	- kaddr   - kernel address
> - *	- size    - size in question
> + *	- start   - kernel start address of region
> + *	- end     - kernel end address of region
>   */
>  SYM_FUNC_START_LOCAL(__dma_inv_area)
>  SYM_FUNC_START_PI(__inval_dcache_area)
>  	/* FALLTHROUGH */
>  
>  /*
> - *	__dma_inv_area(start, size)
> + *	__dma_inv_area(start, end)
>   *	- start   - virtual start address of region
> - *	- size    - size in question
> + *	- end     - virtual end address of region
>   */
> -	add	x1, x1, x0
>  	dcache_line_size x2, x3
>  	sub	x3, x2, #1
>  	tst	x1, x3				// end cache line aligned?
> @@ -237,8 +236,10 @@ SYM_FUNC_END_PI(__dma_flush_area)
>   *	- dir	- DMA direction
>   */
>  SYM_FUNC_START_PI(__dma_map_area)
> +	add	x1, x0, x1
>  	cmp	w2, #DMA_FROM_DEVICE
>  	b.eq	__dma_inv_area
> +	sub	x1, x1, x0
>  	b	__dma_clean_area
>  SYM_FUNC_END_PI(__dma_map_area)
>  
> @@ -249,6 +250,7 @@ SYM_FUNC_END_PI(__dma_map_area)
>   *	- dir	- DMA direction
>   */
>  SYM_FUNC_START_PI(__dma_unmap_area)
> +	add	x1, x0, x1
>  	cmp	w2, #DMA_TO_DEVICE
>  	b.ne	__dma_inv_area
>  	ret
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index ac485163a4a7..4e3505c2bea6 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
>  
>  void arch_invalidate_pmem(void *addr, size_t size)
>  {
> -	__inval_dcache_area(addr, size);
> +	__inval_dcache_area((unsigned long)addr, (unsigned long)addr + size);
>  }
>  EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
>  #endif
> -- 
> 2.31.1.751.gd2f1c929bd-goog
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index a586afa84172..157234706817 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -59,7 +59,7 @@ 
 extern void __flush_icache_range(unsigned long start, unsigned long end);
 extern void invalidate_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
+extern void __inval_dcache_area(unsigned long start, unsigned long end);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pop(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 96873dfa67fd..8df0ac8d9123 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -117,7 +117,7 @@  SYM_CODE_START_LOCAL(preserve_boot_args)
 	dmb	sy				// needed before dc ivac with
 						// MMU off
 
-	mov	x1, #0x20			// 4 x 8 bytes
+	add	x1, x0, #0x20			// 4 x 8 bytes
 	b	__inval_dcache_area		// tail call
 SYM_CODE_END(preserve_boot_args)
 
@@ -268,7 +268,6 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 	 */
 	adrp	x0, init_pg_dir
 	adrp	x1, init_pg_end
-	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
 	/*
@@ -382,12 +381,10 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 
 	adrp	x0, idmap_pg_dir
 	adrp	x1, idmap_pg_end
-	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
 	adrp	x0, init_pg_dir
 	adrp	x1, init_pg_end
-	sub	x1, x1, x0
 	bl	__inval_dcache_area
 
 	ret	x28
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 80da4b8718b6..5170d9ab450a 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -138,25 +138,24 @@  alternative_else_nop_endif
 SYM_FUNC_END(__clean_dcache_area_pou)
 
 /*
- *	__inval_dcache_area(kaddr, size)
+ *	__inval_dcache_area(start, end)
  *
- * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * 	Ensure that any D-cache lines for the interval [start, end)
  * 	are invalidated. Any partial lines at the ends of the interval are
  *	also cleaned to PoC to prevent data loss.
  *
- *	- kaddr   - kernel address
- *	- size    - size in question
+ *	- start   - kernel start address of region
+ *	- end     - kernel end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_inv_area)
 SYM_FUNC_START_PI(__inval_dcache_area)
 	/* FALLTHROUGH */
 
 /*
- *	__dma_inv_area(start, size)
+ *	__dma_inv_area(start, end)
  *	- start   - virtual start address of region
- *	- size    - size in question
+ *	- end     - virtual end address of region
  */
-	add	x1, x1, x0
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	tst	x1, x3				// end cache line aligned?
@@ -237,8 +236,10 @@  SYM_FUNC_END_PI(__dma_flush_area)
  *	- dir	- DMA direction
  */
 SYM_FUNC_START_PI(__dma_map_area)
+	add	x1, x0, x1
 	cmp	w2, #DMA_FROM_DEVICE
 	b.eq	__dma_inv_area
+	sub	x1, x1, x0
 	b	__dma_clean_area
 SYM_FUNC_END_PI(__dma_map_area)
 
@@ -249,6 +250,7 @@  SYM_FUNC_END_PI(__dma_map_area)
  *	- dir	- DMA direction
  */
 SYM_FUNC_START_PI(__dma_unmap_area)
+	add	x1, x0, x1
 	cmp	w2, #DMA_TO_DEVICE
 	b.ne	__dma_inv_area
 	ret
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..4e3505c2bea6 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -88,7 +88,7 @@  EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-	__inval_dcache_area(addr, size);
+	__inval_dcache_area((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif