@@ -385,39 +385,38 @@ alternative_endif
/*
* Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * Corrupts: start, end, tmp1, tmp2
*/
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2
dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
+ bic \start, \start, \tmp2
9998:
.ifc \op, cvau
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvac
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvap
- sys 3, c7, c12, 1, \kaddr // dc cvap
+ sys 3, c7, c12, 1, \start // dc cvap
.else
.ifc \op, cvadp
- sys 3, c7, c13, 1, \kaddr // dc cvadp
+ sys 3, c7, c13, 1, \start // dc cvadp
.else
- dc \op, \kaddr
+ dc \op, \start
.endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
+ add \start, \start, \tmp1
+ cmp \start, \end
b.lo 9998b
dsb \domain
.endm
@@ -8,6 +8,7 @@
#include <asm/alternative.h>
SYM_FUNC_START_PI(__flush_dcache_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__flush_dcache_area)
@@ -123,6 +123,7 @@ SYM_FUNC_END(invalidate_icache_range)
* - size - size in question
*/
SYM_FUNC_START_PI(__flush_dcache_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__flush_dcache_area)
@@ -141,6 +142,7 @@ alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
+ add x1, x0, x1
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
SYM_FUNC_END(__clean_dcache_area_pou)
@@ -202,6 +204,7 @@ SYM_FUNC_START_PI(__clean_dcache_area_poc)
* - start - virtual start address of region
* - size - size in question
*/
+ add x1, x0, x1
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__clean_dcache_area_poc)
@@ -220,6 +223,7 @@ SYM_FUNC_START_PI(__clean_dcache_area_pop)
alternative_if_not ARM64_HAS_DCPOP
b __clean_dcache_area_poc
alternative_else_nop_endif
+ add x1, x0, x1
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__clean_dcache_area_pop)
@@ -233,6 +237,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon <will@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/assembler.h | 27 +++++++++++++-------------- arch/arm64/kvm/hyp/nvhe/cache.S | 1 + arch/arm64/mm/cache.S | 5 +++++ 3 files changed, 19 insertions(+), 14 deletions(-)