@@ -37,8 +37,9 @@ void flush_kernel_dcache_page_addr(void *addr);
flush_kernel_dcache_range_asm((start), (start)+(size));
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
-void flush_kernel_vmap_range(void *vaddr, int size);
-void invalidate_kernel_vmap_range(void *vaddr, int size);
+void flush_cache_vmap_vunmap(unsigned long start, unsigned long end);
+#define flush_cache_vmap(start,end) flush_cache_vmap_vunmap(start,end)
+#define flush_cache_vunmap(start,end) flush_cache_vmap_vunmap(start,end)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
@@ -653,3 +653,15 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+
+void flush_cache_vmap_vunmap(unsigned long start, unsigned long end)
+{
+ BUG_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled());
+
+ /* Inhibit cache move-in */
+ flush_tlb_all();
+
+ /* Flush the entire cache to remove all aliases */
+ flush_cache_all();
+}
+EXPORT_SYMBOL(flush_cache_vmap_vunmap);
Revise flush_cache_vmap and flush_cache_vunmap calls to use flush_cache_vmap_vunmap. It flushes tlb to inhibit move-in and checks that interrupts are disabled on SMP builds. Signed-off-by: John David Anglin <dave.anglin@bell.net> ---