@@ -100,6 +100,9 @@ struct mmu_gather {
/* we are in the middle of an operation to clear
* a full mm and can make some optimizations */
unsigned int fullmm : 1,
+ /* we have a single thread, current, which is active in the user
+ * address space */
+ singlethread: 1,
/* we have performed an operation which
* requires a complete flush of the tlb */
need_flush_all : 1;
@@ -1703,7 +1703,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* operations.
*/
orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
- tlb->fullmm);
+ tlb->singlethread);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_dax(vma)) {
if (arch_needs_pgtable_deposit())
@@ -1971,7 +1971,7 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
* operations.
*/
orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
- tlb->fullmm);
+ tlb->singlethread);
tlb_remove_pud_tlb_entry(tlb, pud, addr);
if (vma_is_dax(vma)) {
spin_unlock(ptl);
@@ -350,7 +350,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
continue;
nr_swap--;
free_swap_and_cache(entry);
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ pte_clear_not_present_full(mm, addr, pte, tlb->singlethread);
continue;
}
@@ -417,7 +417,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
* after pte clearing.
*/
ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
+ tlb->singlethread);
ptent = pte_mkold(ptent);
ptent = pte_mkclean(ptent);
@@ -221,8 +221,9 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
{
tlb->mm = mm;
- /* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));
+ tlb->singlethread = (mm == current->mm) &&
+ (atomic_read(&mm->mm_users) <= 1);
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
@@ -300,7 +301,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
* When this is our mm and there are no other users, there can not be
* a concurrent memory access.
*/
- if (current->mm == tlb->mm && atomic_read(&tlb->mm->mm_users) < 2) {
+ if (tlb->singlethread) {
free_page_and_swap_cache(page);
return false;
}
@@ -1315,7 +1316,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
+ tlb->singlethread);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
@@ -1330,7 +1331,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
* the old TLB after it was marked
* clean.
*/
- if (!tlb->fullmm) {
+ if (!tlb->singlethread) {
force_flush = 1;
locked_flush = 1;
}
@@ -1367,7 +1368,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ pte_clear_not_present_full(mm, addr, pte, tlb->singlethread);
rss[mm_counter(page)]--;
page_remove_rmap(page, false);
put_page(page);
@@ -1389,7 +1390,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ pte_clear_not_present_full(mm, addr, pte, tlb->singlethread);
} while (pte++, addr += PAGE_SIZE, addr != end);
add_mm_rss_vec(mm, rss);