diff mbox series

[RESEND,RFC,1/5] mm: add atomic capability to zap_details

Message ID 20200904113116.20648-2-alazar@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series Remote mapping | expand

Commit Message

Adalbert Lazăr Sept. 4, 2020, 11:31 a.m. UTC
From: Mircea Cirjaliu <mcirjaliu@bitdefender.com>

Force zap_xxx_range() functions to loop without rescheduling.
Useful for unmapping memory in an atomic context, although no
checks for atomic context are being made.

Signed-off-by: Mircea Cirjaliu <mcirjaliu@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 include/linux/mm.h |  6 ++++++
 mm/memory.c        | 11 +++++++----
 2 files changed, 13 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5a323422d783..1be4482a7b81 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1601,8 +1601,14 @@  struct zap_details {
 	struct address_space *check_mapping;	/* Check page->mapping if set */
 	pgoff_t	first_index;			/* Lowest page->index to unmap */
 	pgoff_t last_index;			/* Highest page->index to unmap */
+	bool atomic;				/* Do not sleep. */
 };
 
+static inline bool zap_is_atomic(struct zap_details *details)
+{
+	return (unlikely(details) && details->atomic);
+}
+
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index f703fe8c8346..8e78fb151f8f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1056,7 +1056,7 @@  static unsigned long zap_pte_range(struct mmu_gather *tlb,
 		if (pte_none(ptent))
 			continue;
 
-		if (need_resched())
+		if (!zap_is_atomic(details) && need_resched())
 			break;
 
 		if (pte_present(ptent)) {
@@ -1159,7 +1159,8 @@  static unsigned long zap_pte_range(struct mmu_gather *tlb,
 	}
 
 	if (addr != end) {
-		cond_resched();
+		if (!zap_is_atomic(details))
+			cond_resched();
 		goto again;
 	}
 
@@ -1195,7 +1196,8 @@  static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 			goto next;
 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
 next:
-		cond_resched();
+		if (!zap_is_atomic(details))
+			cond_resched();
 	} while (pmd++, addr = next, addr != end);
 
 	return addr;
@@ -1224,7 +1226,8 @@  static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 			continue;
 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
 next:
-		cond_resched();
+		if (!zap_is_atomic(details))
+			cond_resched();
 	} while (pud++, addr = next, addr != end);
 
 	return addr;