@@ -26,7 +26,9 @@ endif
define arch_elf_check =
endef
-cstart.o = $(TEST_DIR)/cstart.o
+asmobjs = $(TEST_DIR)/cstart.o
+asmobjs += lib/arm/cache.o
+
cflatobjs += lib/arm/spinlock.o
cflatobjs += lib/arm/processor.o
cflatobjs += lib/arm/stack.o
@@ -21,8 +21,10 @@ define arch_elf_check =
$(error $(1) has unsupported reloc types))
endef
-cstart.o = $(TEST_DIR)/cstart64.o
+asmobjs = $(TEST_DIR)/cstart64.o
+asmobjs += lib/arm64/cache.o
cflatobjs += lib/arm64/stack.o
+
cflatobjs += lib/arm64/processor.o
cflatobjs += lib/arm64/spinlock.o
cflatobjs += lib/arm64/gic-v3-its.o lib/arm64/gic-v3-its-cmd.o
@@ -71,7 +71,7 @@ FLATLIBS = $(libcflat) $(LIBFDT_archive) $(libeabi)
ifeq ($(CONFIG_EFI),y)
%.so: EFI_LDFLAGS += -defsym=EFI_SUBSYSTEM=0xa --no-undefined
-%.so: %.o $(FLATLIBS) $(SRCDIR)/arm/efi/elf_aarch64_efi.lds $(cstart.o)
+%.so: %.o $(FLATLIBS) $(SRCDIR)/arm/efi/elf_aarch64_efi.lds $(asmobjs)
$(CC) $(CFLAGS) -c -o $(@:.so=.aux.o) $(SRCDIR)/lib/auxinfo.c \
-DPROGNAME=\"$(@:.so=.efi)\" -DAUXFLAGS=$(AUXFLAGS)
$(LD) $(EFI_LDFLAGS) -o $@ -T $(SRCDIR)/arm/efi/elf_aarch64_efi.lds \
@@ -91,7 +91,7 @@ ifeq ($(CONFIG_EFI),y)
-O binary $^ $@
else
%.elf: LDFLAGS += $(arch_LDFLAGS)
-%.elf: %.o $(FLATLIBS) $(SRCDIR)/arm/flat.lds $(cstart.o)
+%.elf: %.o $(FLATLIBS) $(SRCDIR)/arm/flat.lds $(asmobjs)
$(CC) $(CFLAGS) -c -o $(@:.elf=.aux.o) $(SRCDIR)/lib/auxinfo.c \
-DPROGNAME=\"$(@:.elf=.flat)\" -DAUXFLAGS=$(AUXFLAGS)
$(LD) $(LDFLAGS) -o $@ -T $(SRCDIR)/arm/flat.lds \
@@ -113,4 +113,4 @@ arm_clean: asm_offsets_clean
$(TEST_DIR)/.*.d $(TEST_DIR)/efi/.*.d lib/arm/.*.d
generated-files = $(asm-offsets)
-$(tests-all:.$(exe)=.o) $(cstart.o) $(cflatobjs): $(generated-files)
+$(tests-all:.$(exe)=.o) $(asmobjs) $(cflatobjs): $(generated-files)
@@ -41,7 +41,9 @@
.ifc \op, dccimvac
mcr p15, 0, \addr, c7, c14, 1
.else
- .err
+ .ifc \op, dccmvac
+ mcr p15, 0, \addr, c7, c10, 1
+ .endif
.endif
add \addr, \addr, \tmp1
cmp \addr, \end
new file mode 100644
@@ -0,0 +1 @@
+#include "../../arm64/asm/cacheflush.h"
@@ -45,12 +45,6 @@ static inline void flush_tlb_page(unsigned long vaddr)
isb();
}
-static inline void flush_dcache_addr(unsigned long vaddr)
-{
- /* DCCIMVAC */
- asm volatile("mcr p15, 0, %0, c7, c14, 1" :: "r" (vaddr));
-}
-
#include <asm/mmu-api.h>
#endif /* _ASMARM_MMU_H_ */
new file mode 100644
@@ -0,0 +1,89 @@
+/*
+ * Based on arch/arm64/mm/cache.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012, 2022 ARM Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <asm/assembler.h>
+
+/*
+ * dcache_inval_poc(start, end)
+ *
+ * Ensure that any D-cache lines for the interval [start, end) are
+ * invalidated. Any partial lines at the ends of the interval are cleaned
+ * and invalidated to PoC instead to prevent data loss.
+ *
+ * - start - start address of region
+ * - end - end address of region
+ */
+.global dcache_inval_poc
+dcache_inval_poc:
+ dmb sy
+ dcache_line_size r2, r3
+ sub r3, r2, #1
+ tst r1, r3 // end cache line aligned?
+ bic r1, r1, r3
+ beq 1f
+ // DCCIMVAC
+ mcr p15, 0, r1, c7, c14, 1 // clean + invalidate end cache line
+1: tst r0, r3 // start cache line aligned?
+ bic r0, r0, r3
+ beq 2f
+ mcr p15, 0, r0, c7, c14, 1 // clean + invalidate start cache line
+ b 3f
+ // DCIMVAC
+2: mcr p15, 0, r0, c7, c6, 1 // invalidate current cache line
+3: add r0, r0, r2
+ cmp r0, r1
+ blo 2b
+ dsb sy
+ mov pc, lr
+
+/*
+ * dcache_clean_poc(start, end)
+ *
+ * Ensure that any D-cache lines for the interval [start, end)
+ * are cleaned to the PoC.
+ *
+ * - start - start address of region
+ * - end - end address of region
+ */
+.global dcache_clean_poc
+dcache_clean_poc:
+ dmb sy
+ dcache_by_line_op dccmvac, sy, r0, r1, r2, r3
+ mov pc, lr
+
+/*
+ * dcache_clean_addr_poc(addr)
+ *
+ * Ensure that the D-cache line for address addr is cleaned to the PoC.
+ *
+ * - addr - the address
+ */
+.global dcache_clean_addr_poc
+dcache_clean_addr_poc:
+ dmb sy
+ // DCCMVAC
+ mcr p15, 0, r0, c7, c10, 1
+ dsb sy
+ mov pc, lr
+
+/*
+ * dcache_clean_inval_addr_poc(addr)
+ *
+ * Ensure that the D-cache line for address addr is cleaned and invalidated
+ * to the PoC.
+ *
+ * - addr - the address
+ */
+.global dcache_clean_inval_addr_poc
+dcache_clean_inval_addr_poc:
+ dmb sy
+ // DCCIMVAC
+ mcr p15, 0, r0, c7, c14, 1
+ dsb sy
+ mov pc, lr
new file mode 100644
@@ -0,0 +1,37 @@
+#ifndef _ASMARM64_CACHEFLUSH_H_
+#define _ASMARM64_CACHEFLUSH_H_
+/*
+ * Based on arch/arm64/asm/include/cacheflush.h
+ *
+ * Copyright (C) 1999-2002 Russell King.
+ * Copyright (C) 2012,2022 ARM Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <asm/page.h>
+
+extern void dcache_clean_addr_poc(unsigned long addr);
+/*
+ * Invalidating a specific address is dangerous, because it means invalidating
+ * everything that shares the same cache line. Do clean and invalidate instead,
+ * as the clean is harmless.
+ */
+extern void dcache_clean_inval_addr_poc(unsigned long addr);
+
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+
+static inline void dcache_inval_page_poc(unsigned long page_addr)
+{
+ assert(PAGE_ALIGN(page_addr) == page_addr);
+ dcache_inval_poc(page_addr, page_addr + PAGE_SIZE);
+}
+
+static inline void dcache_clean_page_poc(unsigned long page_addr)
+{
+ assert(PAGE_ALIGN(page_addr) == page_addr);
+ dcache_clean_poc(page_addr, page_addr + PAGE_SIZE);
+}
+
+#endif /* _ASMARM64_CACHEFLUSH_H_ */
@@ -28,11 +28,6 @@ static inline void flush_tlb_page(unsigned long vaddr)
isb();
}
-static inline void flush_dcache_addr(unsigned long vaddr)
-{
- asm volatile("dc civac, %0" :: "r" (vaddr));
-}
-
#include <asm/mmu-api.h>
#endif /* _ASMARM64_MMU_H_ */
new file mode 100644
@@ -0,0 +1,85 @@
+/*
+ * Based on arch/arm64/mm/cache.S
+ *
+ * Copyright (C) 2001 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012, 2022 ARM Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <asm/assembler.h>
+
+/*
+ * dcache_inval_poc(start, end)
+ *
+ * Ensure that any D-cache lines for the interval [start, end) are
+ * invalidated. Any partial lines at the ends of the interval are cleaned
+ * and invalidated to PoC instead to prevent data loss.
+ *
+ * - start - start address of region
+ * - end - end address of region
+ */
+.global dcache_inval_poc
+dcache_inval_poc:
+ dmb sy
+ raw_dcache_line_size x2, x3
+ sub x3, x2, #1
+ tst x1, x3 // end cache line aligned?
+ bic x1, x1, x3
+ b.eq 1f
+ dc civac, x1 // clean + invalidate end cache line
+1: tst x0, x3 // start cache line aligned?
+ bic x0, x0, x3
+ b.eq 2f
+ dc civac, x0 // clean + invalidate start cache line
+ b 3f
+2: dc ivac, x0 // invalidate current cache line
+3: add x0, x0, x2
+ cmp x0, x1
+ b.lo 2b
+ dsb sy
+ ret
+
+/*
+ * dcache_clean_poc(start, end)
+ *
+ * Ensure that any D-cache lines for the interval [start, end)
+ * are cleaned to the PoC.
+ *
+ * - start - start address of region
+ * - end - end address of region
+ */
+.global dcache_clean_poc
+dcache_clean_poc:
+ dmb sy
+ dcache_by_line_op cvac, sy, x0, x1, x2, x3
+ ret
+
+/*
+ * dcache_clean_addr_poc(addr)
+ *
+ * Ensure that the D-cache line for address addr is cleaned to the PoC.
+ *
+ * - addr - the address
+ */
+.global dcache_clean_addr_poc
+dcache_clean_addr_poc:
+ dmb sy
+ dc cvac, x0
+ dsb sy
+ ret
+
+/*
+ * dcache_clean_inval_addr_poc(addr)
+ *
+ * Ensure that the D-cache line for address addr is cleaned and invalidated
+ * to the PoC.
+ *
+ * - addr - the address
+ */
+.global dcache_clean_inval_addr_poc
+dcache_clean_inval_addr_poc:
+ dmb sy
+ dc civac, x0
+ dsb sy
+ ret