diff mbox series

[v14,019/138] mm/filemap: Add folio_lock_killable()

Message ID 20210715033704.692967-20-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox (Oracle) July 15, 2021, 3:35 a.m. UTC
This is like lock_page_killable() but for use by callers who
know they have a folio.  Convert __lock_page_killable() to be
__folio_lock_killable().  This saves one call to compound_head() per
contended call to lock_page_killable().

__folio_lock_killable() is 19 bytes smaller than __lock_page_killable()
was.  filemap_fault() shrinks by 74 bytes and __lock_page_or_retry()
shrinks by 71 bytes.  That's a total of 164 bytes of text saved.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
---
 include/linux/pagemap.h | 15 ++++++++++-----
 mm/filemap.c            | 17 +++++++++--------
 2 files changed, 19 insertions(+), 13 deletions(-)

Comments

Mike Rapoport July 20, 2021, 10:44 a.m. UTC | #1
On Thu, Jul 15, 2021 at 04:35:05AM +0100, Matthew Wilcox (Oracle) wrote:
> This is like lock_page_killable() but for use by callers who
> know they have a folio.  Convert __lock_page_killable() to be
> __folio_lock_killable().  This saves one call to compound_head() per
> contended call to lock_page_killable().
> 
> __folio_lock_killable() is 19 bytes smaller than __lock_page_killable()
> was.  filemap_fault() shrinks by 74 bytes and __lock_page_or_retry()
> shrinks by 71 bytes.  That's a total of 164 bytes of text saved.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Acked-by: Jeff Layton <jlayton@kernel.org>
> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
> ---
>  include/linux/pagemap.h | 15 ++++++++++-----
>  mm/filemap.c            | 17 +++++++++--------
>  2 files changed, 19 insertions(+), 13 deletions(-)

Acked-by: Mike Rapoport <rppt@linux.ibm.com>

> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index c3673c55125b..88727c74e059 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -654,7 +654,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
>  }
>  
>  void __folio_lock(struct folio *folio);
> -extern int __lock_page_killable(struct page *page);
> +int __folio_lock_killable(struct folio *folio);
>  extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
>  extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
>  				unsigned int flags);
> @@ -694,6 +694,14 @@ static inline void lock_page(struct page *page)
>  		__folio_lock(folio);
>  }
>  
> +static inline int folio_lock_killable(struct folio *folio)
> +{
> +	might_sleep();
> +	if (!folio_trylock(folio))
> +		return __folio_lock_killable(folio);
> +	return 0;
> +}
> +
>  /*
>   * lock_page_killable is like lock_page but can be interrupted by fatal
>   * signals.  It returns 0 if it locked the page and -EINTR if it was
> @@ -701,10 +709,7 @@ static inline void lock_page(struct page *page)
>   */
>  static inline int lock_page_killable(struct page *page)
>  {
> -	might_sleep();
> -	if (!trylock_page(page))
> -		return __lock_page_killable(page);
> -	return 0;
> +	return folio_lock_killable(page_folio(page));
>  }
>  
>  /*
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 95f89656f126..962db5c38cd7 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1589,14 +1589,13 @@ void __folio_lock(struct folio *folio)
>  }
>  EXPORT_SYMBOL(__folio_lock);
>  
> -int __lock_page_killable(struct page *__page)
> +int __folio_lock_killable(struct folio *folio)
>  {
> -	struct page *page = compound_head(__page);
> -	wait_queue_head_t *q = page_waitqueue(page);
> -	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
> +	wait_queue_head_t *q = page_waitqueue(&folio->page);
> +	return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
>  					EXCLUSIVE);
>  }
> -EXPORT_SYMBOL_GPL(__lock_page_killable);
> +EXPORT_SYMBOL_GPL(__folio_lock_killable);
>  
>  int __lock_page_async(struct page *page, struct wait_page_queue *wait)
>  {
> @@ -1638,6 +1637,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
>  int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
>  			 unsigned int flags)
>  {
> +	struct folio *folio = page_folio(page);
> +
>  	if (fault_flag_allow_retry_first(flags)) {
>  		/*
>  		 * CAUTION! In this case, mmap_lock is not released
> @@ -1656,13 +1657,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
>  	if (flags & FAULT_FLAG_KILLABLE) {
>  		int ret;
>  
> -		ret = __lock_page_killable(page);
> +		ret = __folio_lock_killable(folio);
>  		if (ret) {
>  			mmap_read_unlock(mm);
>  			return 0;
>  		}
>  	} else {
> -		__folio_lock(page_folio(page));
> +		__folio_lock(folio);
>  	}
>  
>  	return 1;
> @@ -2851,7 +2852,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
>  
>  	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
>  	if (vmf->flags & FAULT_FLAG_KILLABLE) {
> -		if (__lock_page_killable(&folio->page)) {
> +		if (__folio_lock_killable(folio)) {
>  			/*
>  			 * We didn't have the right flags to drop the mmap_lock,
>  			 * but all fault_handlers only check for fatal signals
> -- 
> 2.30.2
> 
>
David Howells Aug. 10, 2021, 3:32 p.m. UTC | #2
Matthew Wilcox (Oracle) <willy@infradead.org> wrote:

> This is like lock_page_killable() but for use by callers who
> know they have a folio.  Convert __lock_page_killable() to be
> __folio_lock_killable().  This saves one call to compound_head() per
> contended call to lock_page_killable().
> 
> __folio_lock_killable() is 19 bytes smaller than __lock_page_killable()
> was.  filemap_fault() shrinks by 74 bytes and __lock_page_or_retry()
> shrinks by 71 bytes.  That's a total of 164 bytes of text saved.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Acked-by: Jeff Layton <jlayton@kernel.org>
> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: William Kucharski <william.kucharski@oracle.com>

Reviewed-by: David Howells <dhowells@redhat.com>
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c3673c55125b..88727c74e059 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -654,7 +654,7 @@  static inline bool wake_page_match(struct wait_page_queue *wait_page,
 }
 
 void __folio_lock(struct folio *folio);
-extern int __lock_page_killable(struct page *page);
+int __folio_lock_killable(struct folio *folio);
 extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 				unsigned int flags);
@@ -694,6 +694,14 @@  static inline void lock_page(struct page *page)
 		__folio_lock(folio);
 }
 
+static inline int folio_lock_killable(struct folio *folio)
+{
+	might_sleep();
+	if (!folio_trylock(folio))
+		return __folio_lock_killable(folio);
+	return 0;
+}
+
 /*
  * lock_page_killable is like lock_page but can be interrupted by fatal
  * signals.  It returns 0 if it locked the page and -EINTR if it was
@@ -701,10 +709,7 @@  static inline void lock_page(struct page *page)
  */
 static inline int lock_page_killable(struct page *page)
 {
-	might_sleep();
-	if (!trylock_page(page))
-		return __lock_page_killable(page);
-	return 0;
+	return folio_lock_killable(page_folio(page));
 }
 
 /*
diff --git a/mm/filemap.c b/mm/filemap.c
index 95f89656f126..962db5c38cd7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1589,14 +1589,13 @@  void __folio_lock(struct folio *folio)
 }
 EXPORT_SYMBOL(__folio_lock);
 
-int __lock_page_killable(struct page *__page)
+int __folio_lock_killable(struct folio *folio)
 {
-	struct page *page = compound_head(__page);
-	wait_queue_head_t *q = page_waitqueue(page);
-	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
+	wait_queue_head_t *q = page_waitqueue(&folio->page);
+	return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
 					EXCLUSIVE);
 }
-EXPORT_SYMBOL_GPL(__lock_page_killable);
+EXPORT_SYMBOL_GPL(__folio_lock_killable);
 
 int __lock_page_async(struct page *page, struct wait_page_queue *wait)
 {
@@ -1638,6 +1637,8 @@  int __lock_page_async(struct page *page, struct wait_page_queue *wait)
 int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			 unsigned int flags)
 {
+	struct folio *folio = page_folio(page);
+
 	if (fault_flag_allow_retry_first(flags)) {
 		/*
 		 * CAUTION! In this case, mmap_lock is not released
@@ -1656,13 +1657,13 @@  int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 	if (flags & FAULT_FLAG_KILLABLE) {
 		int ret;
 
-		ret = __lock_page_killable(page);
+		ret = __folio_lock_killable(folio);
 		if (ret) {
 			mmap_read_unlock(mm);
 			return 0;
 		}
 	} else {
-		__folio_lock(page_folio(page));
+		__folio_lock(folio);
 	}
 
 	return 1;
@@ -2851,7 +2852,7 @@  static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
 
 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
-		if (__lock_page_killable(&folio->page)) {
+		if (__folio_lock_killable(folio)) {
 			/*
 			 * We didn't have the right flags to drop the mmap_lock,
 			 * but all fault_handlers only check for fatal signals