@@ -633,16 +619,23 @@ void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
+ unsigned long physaddr;
+ flush_tlb_kernel_range(start, end);
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
flush_data_cache();
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ preempt_disable();
+ while (start <= end) {
+ physaddr = lpa(start);
+ if (physaddr)
+ flush_dcache_page_asm(physaddr, start);
+ start += PAGE_SIZE;
+ }
+ preempt_enable();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -650,15 +643,22 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
+ unsigned long physaddr;
+ flush_tlb_kernel_range(start, end);
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
flush_data_cache();
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ preempt_disable();
+ while (start <= end) {
+ physaddr = lpa(start);
+ if (physaddr)
+ purge_dcache_page_asm(physaddr, start);
+ start += PAGE_SIZE;
+ }
+ preempt_enable();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
Cache move-in for virtual accesses is controlled by the TLB. Thus, we must generally purge TLB entries before flushing. The flush routines must use TLB entries that inhibit cache move-in. Signed-off-by: John David Anglin <dave.anglin@bell.net> ---