diff mbox series

[RFC,09/24] do_swap_page: use the vmf->range field when dropping mmap_sem

Message ID 20200224203057.162467-10-walken@google.com (mailing list archive)
State New, archived
Headers show
Series Fine grained MM locking | expand

Commit Message

Michel Lespinasse Feb. 24, 2020, 8:30 p.m. UTC
Change do_swap_page() and lock_page_or_retry() so that the proper range
will be released when swapping in.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 include/linux/pagemap.h | 7 ++++---
 mm/filemap.c            | 6 +++---
 mm/memory.c             | 2 +-
 3 files changed, 8 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git include/linux/pagemap.h include/linux/pagemap.h
index 37a4d9e32cd3..93520477c481 100644
--- include/linux/pagemap.h
+++ include/linux/pagemap.h
@@ -458,7 +458,7 @@  static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
 extern void __lock_page(struct page *page);
 extern int __lock_page_killable(struct page *page);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
-				unsigned int flags);
+		unsigned int flags, struct mm_lock_range *range);
 extern void unlock_page(struct page *page);
 
 /*
@@ -501,10 +501,11 @@  static inline int lock_page_killable(struct page *page)
  * __lock_page_or_retry().
  */
 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
-				     unsigned int flags)
+		unsigned int flags, struct mm_lock_range *range)
 {
 	might_sleep();
-	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+	return trylock_page(page) || __lock_page_or_retry(page, mm, flags,
+							  range);
 }
 
 /*
diff --git mm/filemap.c mm/filemap.c
index eb6487065ca0..3afb5a3f0b9c 100644
--- mm/filemap.c
+++ mm/filemap.c
@@ -1406,7 +1406,7 @@  EXPORT_SYMBOL_GPL(__lock_page_killable);
  * with the page locked and the mmap_sem unperturbed.
  */
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
-			 unsigned int flags)
+			 unsigned int flags, struct mm_lock_range *range)
 {
 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
 		/*
@@ -1416,7 +1416,7 @@  int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
 			return 0;
 
-		mm_read_unlock(mm);
+		mm_read_range_unlock(mm, range);
 		if (flags & FAULT_FLAG_KILLABLE)
 			wait_on_page_locked_killable(page);
 		else
@@ -1428,7 +1428,7 @@  int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 
 			ret = __lock_page_killable(page);
 			if (ret) {
-				mm_read_unlock(mm);
+				mm_read_range_unlock(mm, range);
 				return 0;
 			}
 		} else
diff --git mm/memory.c mm/memory.c
index bc24a6bdaa06..3da4ae504957 100644
--- mm/memory.c
+++ mm/memory.c
@@ -2964,7 +2964,7 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		goto out_release;
 	}
 
-	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
+	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags, vmf->range);
 
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 	if (!locked) {