Message ID | 20210520124406.2731873-13-tabba@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Tidy up cache.S | expand |
On Thu, May 20, 2021 at 01:44:00PM +0100, Fuad Tabba wrote: > To be consistent with other functions with similar names and > functionality in cacheflush.h, cache.S, and cachetlb.rst, change > to specify the range in terms of start and end, as opposed to > start and size. > > No functional change intended. > > Reported-by: Will Deacon <will@kernel.org> > Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by Mark Rutland <mark.rutland@arm.com> Mark. > --- > arch/arm64/include/asm/arch_gicv3.h | 3 ++- > arch/arm64/include/asm/cacheflush.h | 8 ++++---- > arch/arm64/include/asm/efi.h | 2 +- > arch/arm64/include/asm/kvm_mmu.h | 3 ++- > arch/arm64/kernel/hibernate.c | 18 +++++++++++------- > arch/arm64/kernel/idreg-override.c | 3 ++- > arch/arm64/kernel/kaslr.c | 12 +++++++++--- > arch/arm64/kernel/machine_kexec.c | 20 +++++++++++++------- > arch/arm64/kernel/smp.c | 8 ++++++-- > arch/arm64/kernel/smp_spin_table.c | 7 ++++--- > arch/arm64/kvm/hyp/nvhe/cache.S | 1 - > arch/arm64/kvm/hyp/nvhe/setup.c | 3 ++- > arch/arm64/kvm/hyp/pgtable.c | 13 ++++++++++--- > arch/arm64/mm/cache.S | 9 ++++----- > 14 files changed, 70 insertions(+), 40 deletions(-) > > diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h > index 934b9be582d2..ed1cc9d8e6df 100644 > --- a/arch/arm64/include/asm/arch_gicv3.h > +++ b/arch/arm64/include/asm/arch_gicv3.h > @@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void) > #define gic_read_lpir(c) readq_relaxed(c) > #define gic_write_lpir(v, c) writeq_relaxed(v, c) > > -#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) > +#define gic_flush_dcache_to_poc(a,l) \ > + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) > > #define gits_read_baser(c) readq_relaxed(c) > #define gits_write_baser(v, c) writeq_relaxed(v, c) > diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h > index 157234706817..695f88864784 100644 > --- a/arch/arm64/include/asm/cacheflush.h > +++ b/arch/arm64/include/asm/cacheflush.h > @@ -50,15 +50,15 @@ > * - start - virtual start address > * - end - virtual end address > * > - * __flush_dcache_area(kaddr, size) > + * __flush_dcache_area(start, end) > * > * Ensure that the data held in page is written back. > - * - kaddr - page address > - * - size - region size > + * - start - virtual start address > + * - end - virtual end address > */ > extern void __flush_icache_range(unsigned long start, unsigned long end); > extern void invalidate_icache_range(unsigned long start, unsigned long end); > -extern void __flush_dcache_area(void *addr, size_t len); > +extern void __flush_dcache_area(unsigned long start, unsigned long end); > extern void __inval_dcache_area(unsigned long start, unsigned long end); > extern void __clean_dcache_area_poc(void *addr, size_t len); > extern void __clean_dcache_area_pop(void *addr, size_t len); > diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h > index 3578aba9c608..0ae2397076fd 100644 > --- a/arch/arm64/include/asm/efi.h > +++ b/arch/arm64/include/asm/efi.h > @@ -137,7 +137,7 @@ void efi_virtmap_unload(void); > > static inline void efi_capsule_flush_cache_range(void *addr, int size) > { > - __flush_dcache_area(addr, size); > + __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size); > } > > #endif /* _ASM_EFI_H */ > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h > index 25ed956f9af1..33293d5855af 100644 > --- a/arch/arm64/include/asm/kvm_mmu.h > +++ b/arch/arm64/include/asm/kvm_mmu.h > @@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base, > > struct kvm; > > -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) > +#define kvm_flush_dcache_to_poc(a,l) \ > + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) > > static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) > { > diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c > index b1cef371df2b..b40ddce71507 100644 > --- a/arch/arm64/kernel/hibernate.c > +++ b/arch/arm64/kernel/hibernate.c > @@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length, > return 0; > } > > -#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) > - > #ifdef CONFIG_ARM64_MTE > > static DEFINE_XARRAY(mte_pages); > @@ -383,13 +381,18 @@ int swsusp_arch_suspend(void) > ret = swsusp_save(); > } else { > /* Clean kernel core startup/idle code to PoC*/ > - dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); > - dcache_clean_range(__idmap_text_start, __idmap_text_end); > + __flush_dcache_area((unsigned long)__mmuoff_data_start, > + (unsigned long)__mmuoff_data_end); > + __flush_dcache_area((unsigned long)__idmap_text_start, > + (unsigned long)__idmap_text_end); > > /* Clean kvm setup code to PoC? */ > if (el2_reset_needed()) { > - dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); > - dcache_clean_range(__hyp_text_start, __hyp_text_end); > + __flush_dcache_area( > + (unsigned long)__hyp_idmap_text_start, > + (unsigned long)__hyp_idmap_text_end); > + __flush_dcache_area((unsigned long)__hyp_text_start, > + (unsigned long)__hyp_text_end); > } > > swsusp_mte_restore_tags(); > @@ -474,7 +477,8 @@ int swsusp_arch_resume(void) > * The hibernate exit text contains a set of el2 vectors, that will > * be executed at el2 with the mmu off in order to reload hyp-stub. > */ > - __flush_dcache_area(hibernate_exit, exit_size); > + __flush_dcache_area((unsigned long)hibernate_exit, > + (unsigned long)hibernate_exit + exit_size); > > /* > * KASLR will cause the el2 vectors to be in a different location in > diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c > index e628c8ce1ffe..3dd515baf526 100644 > --- a/arch/arm64/kernel/idreg-override.c > +++ b/arch/arm64/kernel/idreg-override.c > @@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void) > > for (i = 0; i < ARRAY_SIZE(regs); i++) { > if (regs[i]->override) > - __flush_dcache_area(regs[i]->override, > + __flush_dcache_area((unsigned long)regs[i]->override, > + (unsigned long)regs[i]->override + > sizeof(*regs[i]->override)); > } > } > diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c > index 341342b207f6..49cccd03cb37 100644 > --- a/arch/arm64/kernel/kaslr.c > +++ b/arch/arm64/kernel/kaslr.c > @@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void) > * we end up running with module randomization disabled. > */ > module_alloc_base = (u64)_etext - MODULES_VSIZE; > - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); > + __flush_dcache_area((unsigned long)&module_alloc_base, > + (unsigned long)&module_alloc_base + > + sizeof(module_alloc_base)); > > /* > * Try to map the FDT early. If this fails, we simply bail, > @@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void) > module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; > module_alloc_base &= PAGE_MASK; > > - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); > - __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); > + __flush_dcache_area((unsigned long)&module_alloc_base, > + (unsigned long)&module_alloc_base + > + sizeof(module_alloc_base)); > + __flush_dcache_area((unsigned long)&memstart_offset_seed, > + (unsigned long)&memstart_offset_seed + > + sizeof(memstart_offset_seed)); > > return offset; > } > diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c > index a03944fd0cd4..3e79110c8f3a 100644 > --- a/arch/arm64/kernel/machine_kexec.c > +++ b/arch/arm64/kernel/machine_kexec.c > @@ -72,7 +72,9 @@ int machine_kexec_post_load(struct kimage *kimage) > * For execution with the MMU off, reloc_code needs to be cleaned to the > * PoC and invalidated from the I-cache. > */ > - __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size); > + __flush_dcache_area((unsigned long)reloc_code, > + (unsigned long)reloc_code + > + arm64_relocate_new_kernel_size); > invalidate_icache_range((uintptr_t)reloc_code, > (uintptr_t)reloc_code + > arm64_relocate_new_kernel_size); > @@ -106,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage) > > for (entry = &kimage->head; ; entry++) { > unsigned int flag; > - void *addr; > + unsigned long addr; > > /* flush the list entries. */ > - __flush_dcache_area(entry, sizeof(kimage_entry_t)); > + __flush_dcache_area((unsigned long)entry, > + (unsigned long)entry + > + sizeof(kimage_entry_t)); > > flag = *entry & IND_FLAGS; > if (flag == IND_DONE) > break; > > - addr = phys_to_virt(*entry & PAGE_MASK); > + addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK); > > switch (flag) { > case IND_INDIRECTION: > @@ -124,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage) > break; > case IND_SOURCE: > /* flush the source pages. */ > - __flush_dcache_area(addr, PAGE_SIZE); > + __flush_dcache_area(addr, addr + PAGE_SIZE); > break; > case IND_DESTINATION: > break; > @@ -151,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage) > kimage->segment[i].memsz, > kimage->segment[i].memsz / PAGE_SIZE); > > - __flush_dcache_area(phys_to_virt(kimage->segment[i].mem), > - kimage->segment[i].memsz); > + __flush_dcache_area( > + (unsigned long)phys_to_virt(kimage->segment[i].mem), > + (unsigned long)phys_to_virt(kimage->segment[i].mem) + > + kimage->segment[i].memsz); > } > } > > diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c > index dcd7041b2b07..5fcdee331087 100644 > --- a/arch/arm64/kernel/smp.c > +++ b/arch/arm64/kernel/smp.c > @@ -122,7 +122,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) > secondary_data.task = idle; > secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; > update_cpu_boot_status(CPU_MMU_OFF); > - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); > + __flush_dcache_area((unsigned long)&secondary_data, > + (unsigned long)&secondary_data + > + sizeof(secondary_data)); > > /* Now bring the CPU into our world */ > ret = boot_secondary(cpu, idle); > @@ -143,7 +145,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) > pr_crit("CPU%u: failed to come online\n", cpu); > secondary_data.task = NULL; > secondary_data.stack = NULL; > - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); > + __flush_dcache_area((unsigned long)&secondary_data, > + (unsigned long)&secondary_data + > + sizeof(secondary_data)); > status = READ_ONCE(secondary_data.status); > if (status == CPU_MMU_OFF) > status = READ_ONCE(__early_cpu_boot_status); > diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c > index c45a83512805..58d804582a35 100644 > --- a/arch/arm64/kernel/smp_spin_table.c > +++ b/arch/arm64/kernel/smp_spin_table.c > @@ -36,7 +36,7 @@ static void write_pen_release(u64 val) > unsigned long size = sizeof(secondary_holding_pen_release); > > secondary_holding_pen_release = val; > - __flush_dcache_area(start, size); > + __flush_dcache_area((unsigned long)start, (unsigned long)start + size); > } > > > @@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) > * the boot protocol. > */ > writeq_relaxed(pa_holding_pen, release_addr); > - __flush_dcache_area((__force void *)release_addr, > - sizeof(*release_addr)); > + __flush_dcache_area((__force unsigned long)release_addr, > + (__force unsigned long)release_addr + > + sizeof(*release_addr)); > > /* > * Send an event to wake up the secondary CPU. > diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S > index 3bcfa3cac46f..36cef6915428 100644 > --- a/arch/arm64/kvm/hyp/nvhe/cache.S > +++ b/arch/arm64/kvm/hyp/nvhe/cache.S > @@ -8,7 +8,6 @@ > #include <asm/alternative.h> > > SYM_FUNC_START_PI(__flush_dcache_area) > - add x1, x0, x1 > dcache_by_line_op civac, sy, x0, x1, x2, x3 > ret > SYM_FUNC_END_PI(__flush_dcache_area) > diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c > index 7488f53b0aa2..5dffe928f256 100644 > --- a/arch/arm64/kvm/hyp/nvhe/setup.c > +++ b/arch/arm64/kvm/hyp/nvhe/setup.c > @@ -134,7 +134,8 @@ static void update_nvhe_init_params(void) > for (i = 0; i < hyp_nr_cpus; i++) { > params = per_cpu_ptr(&kvm_init_params, i); > params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); > - __flush_dcache_area(params, sizeof(*params)); > + __flush_dcache_area((unsigned long)params, > + (unsigned long)params + sizeof(*params)); > } > } > > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c > index c37c1dc4feaf..10d2f04013d4 100644 > --- a/arch/arm64/kvm/hyp/pgtable.c > +++ b/arch/arm64/kvm/hyp/pgtable.c > @@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, > stage2_put_pte(ptep, mmu, addr, level, mm_ops); > > if (need_flush) { > - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), > - kvm_granule_size(level)); > + kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); > + > + __flush_dcache_area((unsigned long)pte_follow, > + (unsigned long)pte_follow + > + kvm_granule_size(level)); > } > > if (childp) > @@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, > struct kvm_pgtable *pgt = arg; > struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; > kvm_pte_t pte = *ptep; > + kvm_pte_t *pte_follow; > > if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) > return 0; > > - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); > + pte_follow = kvm_pte_follow(pte, mm_ops); > + __flush_dcache_area((unsigned long)pte_follow, > + (unsigned long)pte_follow + > + kvm_granule_size(level)); > return 0; > } > > diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S > index 3b5461a32b85..35abc8d77c4e 100644 > --- a/arch/arm64/mm/cache.S > +++ b/arch/arm64/mm/cache.S > @@ -106,16 +106,15 @@ alternative_else_nop_endif > SYM_FUNC_END(invalidate_icache_range) > > /* > - * __flush_dcache_area(kaddr, size) > + * __flush_dcache_area(start, end) > * > - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) > + * Ensure that any D-cache lines for the interval [start, end) > * are cleaned and invalidated to the PoC. > * > - * - kaddr - kernel address > - * - size - size in question > + * - start - virtual start address of region > + * - end - virtual end address of region > */ > SYM_FUNC_START_PI(__flush_dcache_area) > - add x1, x0, x1 > dcache_by_line_op civac, sy, x0, x1, x2, x3 > ret > SYM_FUNC_END_PI(__flush_dcache_area) > -- > 2.31.1.751.gd2f1c929bd-goog >
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 934b9be582d2..ed1cc9d8e6df 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void) #define gic_read_lpir(c) readq_relaxed(c) #define gic_write_lpir(v, c) writeq_relaxed(v, c) -#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define gic_flush_dcache_to_poc(a,l) \ + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) #define gits_read_baser(c) readq_relaxed(c) #define gits_write_baser(v, c) writeq_relaxed(v, c) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 157234706817..695f88864784 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -50,15 +50,15 @@ * - start - virtual start address * - end - virtual end address * - * __flush_dcache_area(kaddr, size) + * __flush_dcache_area(start, end) * * Ensure that the data held in page is written back. - * - kaddr - page address - * - size - region size + * - start - virtual start address + * - end - virtual end address */ extern void __flush_icache_range(unsigned long start, unsigned long end); extern void invalidate_icache_range(unsigned long start, unsigned long end); -extern void __flush_dcache_area(void *addr, size_t len); +extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_pop(void *addr, size_t len); diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 3578aba9c608..0ae2397076fd 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -137,7 +137,7 @@ void efi_virtmap_unload(void); static inline void efi_capsule_flush_cache_range(void *addr, int size) { - __flush_dcache_area(addr, size); + __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size); } #endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 25ed956f9af1..33293d5855af 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base, struct kvm; -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define kvm_flush_dcache_to_poc(a,l) \ + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index b1cef371df2b..b40ddce71507 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length, return 0; } -#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) - #ifdef CONFIG_ARM64_MTE static DEFINE_XARRAY(mte_pages); @@ -383,13 +381,18 @@ int swsusp_arch_suspend(void) ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ - dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); - dcache_clean_range(__idmap_text_start, __idmap_text_end); + __flush_dcache_area((unsigned long)__mmuoff_data_start, + (unsigned long)__mmuoff_data_end); + __flush_dcache_area((unsigned long)__idmap_text_start, + (unsigned long)__idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) { - dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); - dcache_clean_range(__hyp_text_start, __hyp_text_end); + __flush_dcache_area( + (unsigned long)__hyp_idmap_text_start, + (unsigned long)__hyp_idmap_text_end); + __flush_dcache_area((unsigned long)__hyp_text_start, + (unsigned long)__hyp_text_end); } swsusp_mte_restore_tags(); @@ -474,7 +477,8 @@ int swsusp_arch_resume(void) * The hibernate exit text contains a set of el2 vectors, that will * be executed at el2 with the mmu off in order to reload hyp-stub. */ - __flush_dcache_area(hibernate_exit, exit_size); + __flush_dcache_area((unsigned long)hibernate_exit, + (unsigned long)hibernate_exit + exit_size); /* * KASLR will cause the el2 vectors to be in a different location in diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index e628c8ce1ffe..3dd515baf526 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void) for (i = 0; i < ARRAY_SIZE(regs); i++) { if (regs[i]->override) - __flush_dcache_area(regs[i]->override, + __flush_dcache_area((unsigned long)regs[i]->override, + (unsigned long)regs[i]->override + sizeof(*regs[i]->override)); } } diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 341342b207f6..49cccd03cb37 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area((unsigned long)&module_alloc_base, + (unsigned long)&module_alloc_base + + sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, @@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); - __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + __flush_dcache_area((unsigned long)&module_alloc_base, + (unsigned long)&module_alloc_base + + sizeof(module_alloc_base)); + __flush_dcache_area((unsigned long)&memstart_offset_seed, + (unsigned long)&memstart_offset_seed + + sizeof(memstart_offset_seed)); return offset; } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index a03944fd0cd4..3e79110c8f3a 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -72,7 +72,9 @@ int machine_kexec_post_load(struct kimage *kimage) * For execution with the MMU off, reloc_code needs to be cleaned to the * PoC and invalidated from the I-cache. */ - __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size); + __flush_dcache_area((unsigned long)reloc_code, + (unsigned long)reloc_code + + arm64_relocate_new_kernel_size); invalidate_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code + arm64_relocate_new_kernel_size); @@ -106,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage) for (entry = &kimage->head; ; entry++) { unsigned int flag; - void *addr; + unsigned long addr; /* flush the list entries. */ - __flush_dcache_area(entry, sizeof(kimage_entry_t)); + __flush_dcache_area((unsigned long)entry, + (unsigned long)entry + + sizeof(kimage_entry_t)); flag = *entry & IND_FLAGS; if (flag == IND_DONE) break; - addr = phys_to_virt(*entry & PAGE_MASK); + addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK); switch (flag) { case IND_INDIRECTION: @@ -124,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage) break; case IND_SOURCE: /* flush the source pages. */ - __flush_dcache_area(addr, PAGE_SIZE); + __flush_dcache_area(addr, addr + PAGE_SIZE); break; case IND_DESTINATION: break; @@ -151,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage) kimage->segment[i].memsz, kimage->segment[i].memsz / PAGE_SIZE); - __flush_dcache_area(phys_to_virt(kimage->segment[i].mem), - kimage->segment[i].memsz); + __flush_dcache_area( + (unsigned long)phys_to_virt(kimage->segment[i].mem), + (unsigned long)phys_to_virt(kimage->segment[i].mem) + + kimage->segment[i].memsz); } } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dcd7041b2b07..5fcdee331087 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -122,7 +122,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; update_cpu_boot_status(CPU_MMU_OFF); - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); + __flush_dcache_area((unsigned long)&secondary_data, + (unsigned long)&secondary_data + + sizeof(secondary_data)); /* Now bring the CPU into our world */ ret = boot_secondary(cpu, idle); @@ -143,7 +145,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; secondary_data.stack = NULL; - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); + __flush_dcache_area((unsigned long)&secondary_data, + (unsigned long)&secondary_data + + sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (status == CPU_MMU_OFF) status = READ_ONCE(__early_cpu_boot_status); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index c45a83512805..58d804582a35 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -36,7 +36,7 @@ static void write_pen_release(u64 val) unsigned long size = sizeof(secondary_holding_pen_release); secondary_holding_pen_release = val; - __flush_dcache_area(start, size); + __flush_dcache_area((unsigned long)start, (unsigned long)start + size); } @@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * the boot protocol. */ writeq_relaxed(pa_holding_pen, release_addr); - __flush_dcache_area((__force void *)release_addr, - sizeof(*release_addr)); + __flush_dcache_area((__force unsigned long)release_addr, + (__force unsigned long)release_addr + + sizeof(*release_addr)); /* * Send an event to wake up the secondary CPU. diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S index 3bcfa3cac46f..36cef6915428 100644 --- a/arch/arm64/kvm/hyp/nvhe/cache.S +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -8,7 +8,6 @@ #include <asm/alternative.h> SYM_FUNC_START_PI(__flush_dcache_area) - add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 7488f53b0aa2..5dffe928f256 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -134,7 +134,8 @@ static void update_nvhe_init_params(void) for (i = 0; i < hyp_nr_cpus; i++) { params = per_cpu_ptr(&kvm_init_params, i); params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); - __flush_dcache_area(params, sizeof(*params)); + __flush_dcache_area((unsigned long)params, + (unsigned long)params + sizeof(*params)); } } diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index c37c1dc4feaf..10d2f04013d4 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, stage2_put_pte(ptep, mmu, addr, level, mm_ops); if (need_flush) { - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), - kvm_granule_size(level)); + kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); + + __flush_dcache_area((unsigned long)pte_follow, + (unsigned long)pte_follow + + kvm_granule_size(level)); } if (childp) @@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct kvm_pgtable *pgt = arg; struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep; + kvm_pte_t *pte_follow; if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) return 0; - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); + pte_follow = kvm_pte_follow(pte, mm_ops); + __flush_dcache_area((unsigned long)pte_follow, + (unsigned long)pte_follow + + kvm_granule_size(level)); return 0; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 3b5461a32b85..35abc8d77c4e 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -106,16 +106,15 @@ alternative_else_nop_endif SYM_FUNC_END(invalidate_icache_range) /* - * __flush_dcache_area(kaddr, size) + * __flush_dcache_area(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned and invalidated to the PoC. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START_PI(__flush_dcache_area) - add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area)
To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon <will@kernel.org> Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/arm64/include/asm/arch_gicv3.h | 3 ++- arch/arm64/include/asm/cacheflush.h | 8 ++++---- arch/arm64/include/asm/efi.h | 2 +- arch/arm64/include/asm/kvm_mmu.h | 3 ++- arch/arm64/kernel/hibernate.c | 18 +++++++++++------- arch/arm64/kernel/idreg-override.c | 3 ++- arch/arm64/kernel/kaslr.c | 12 +++++++++--- arch/arm64/kernel/machine_kexec.c | 20 +++++++++++++------- arch/arm64/kernel/smp.c | 8 ++++++-- arch/arm64/kernel/smp_spin_table.c | 7 ++++--- arch/arm64/kvm/hyp/nvhe/cache.S | 1 - arch/arm64/kvm/hyp/nvhe/setup.c | 3 ++- arch/arm64/kvm/hyp/pgtable.c | 13 ++++++++++--- arch/arm64/mm/cache.S | 9 ++++----- 14 files changed, 70 insertions(+), 40 deletions(-)