@@ -1601,8 +1601,14 @@ struct zap_details {
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ bool atomic; /* Do not sleep. */
};
+static inline bool zap_is_atomic(struct zap_details *details)
+{
+ return (unlikely(details) && details->atomic);
+}
+
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1056,7 +1056,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (pte_none(ptent))
continue;
- if (need_resched())
+ if (!zap_is_atomic(details) && need_resched())
break;
if (pte_present(ptent)) {
@@ -1159,7 +1159,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
}
if (addr != end) {
- cond_resched();
+ if (!zap_is_atomic(details))
+ cond_resched();
goto again;
}
@@ -1195,7 +1196,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
goto next;
next = zap_pte_range(tlb, vma, pmd, addr, next, details);
next:
- cond_resched();
+ if (!zap_is_atomic(details))
+ cond_resched();
} while (pmd++, addr = next, addr != end);
return addr;
@@ -1224,7 +1226,8 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
continue;
next = zap_pmd_range(tlb, vma, pud, addr, next, details);
next:
- cond_resched();
+ if (!zap_is_atomic(details))
+ cond_resched();
} while (pud++, addr = next, addr != end);
return addr;