@@ -435,6 +435,9 @@ void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details);
+void notify_unmap_single_vma(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long addr,
+ unsigned long size, struct zap_details *details);
int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
gfp_t gfp);
@@ -851,7 +851,8 @@ static int madvise_free_single_vma(
* An interface that causes the system to free clean pages and flush
* dirty pages is already available as msync(MS_INVALIDATE).
*/
-static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
+static long madvise_dontneed_single_vma(struct madvise_behavior *behavior,
+ struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct zap_details details = {
@@ -859,7 +860,7 @@ static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
.even_cows = true,
};
- zap_page_range_single(vma, start, end - start, &details);
+ notify_unmap_single_vma(behavior->tlb, vma, start, end - start, &details);
return 0;
}
@@ -949,7 +950,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
}
if (action == MADV_DONTNEED || action == MADV_DONTNEED_LOCKED)
- return madvise_dontneed_single_vma(vma, start, end);
+ return madvise_dontneed_single_vma(behavior, vma, start, end);
else if (action == MADV_FREE)
return madvise_free_single_vma(behavior, vma, start, end);
else
@@ -1627,6 +1628,8 @@ static void madvise_unlock(struct mm_struct *mm, int behavior)
static bool madvise_batch_tlb_flush(int behavior)
{
switch (behavior) {
+ case MADV_DONTNEED:
+ case MADV_DONTNEED_LOCKED:
case MADV_FREE:
return true;
default:
@@ -1989,7 +1989,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
mmu_notifier_invalidate_range_end(&range);
}
-/*
+/**
* notify_unmap_single_vma - remove user pages in a given range
* @tlb: pointer to the caller's struct mmu_gather
* @vma: vm_area_struct holding the applicable pages
@@ -2000,7 +2000,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
* hugetlb, @tlb is flushed and re-initialized by this function.
*/
-static void notify_unmap_single_vma(struct mmu_gather *tlb,
+void notify_unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{
Batch tlb flushes for MADV_DONTNEED[_LOCKED] for better efficiency, in a way that very similar to the tlb flushes batching for MADV_FREE. Signed-off-by: SeongJae Park <sj@kernel.org> --- mm/internal.h | 3 +++ mm/madvise.c | 9 ++++++--- mm/memory.c | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-)