@@ -1281,6 +1281,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
{
struct mm_struct *mm = tlb->mm;
int force_flush = 0;
+ int locked_flush = 0;
int rss[NR_MM_COUNTERS];
spinlock_t *ptl;
pte_t *start_pte;
@@ -1322,6 +1323,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (!PageAnon(page)) {
if (pte_dirty(ptent)) {
force_flush = 1;
+ locked_flush = 1;
set_page_dirty(page);
}
if (pte_young(ptent) &&
@@ -1384,7 +1386,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_leave_lazy_mmu_mode();
/* Do the actual TLB flush before dropping ptl */
- if (force_flush)
+ if (locked_flush)
tlb_flush_mmu_tlbonly(tlb);
pte_unmap_unlock(start_pte, ptl);
@@ -1395,8 +1397,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
* memory too. Restart if we didn't do everything.
*/
if (force_flush) {
- force_flush = 0;
+ if (!locked_flush)
+ tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
+
+ force_flush = 0;
+ locked_flush = 0;
if (addr != end)
goto again;
}