diff mbox series

[08/23] ocfs2: Convert ocfs2_inode_lock_with_page() to ocfs2_inode_lock_with_folio()

Message ID 20241205171653.3179945-9-willy@infradead.org (mailing list archive)
State New
Headers show
Series Convert ocfs2 to use folios | expand

Commit Message

Matthew Wilcox (Oracle) Dec. 5, 2024, 5:16 p.m. UTC
From: Mark Tinguely <mark.tinguely@oracle.com>

Saves a hidden call to compound_head().

Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/ocfs2/aops.c    |  2 +-
 fs/ocfs2/dlmglue.c | 18 ++++++++----------
 fs/ocfs2/dlmglue.h |  6 ++----
 3 files changed, 11 insertions(+), 15 deletions(-)

Comments

Joseph Qi Dec. 14, 2024, 2:10 p.m. UTC | #1
On 2024/12/6 01:16, Matthew Wilcox (Oracle) wrote:
> From: Mark Tinguely <mark.tinguely@oracle.com>
> 
> Saves a hidden call to compound_head().
> 
> Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Looks good.
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>

> ---
>  fs/ocfs2/aops.c    |  2 +-
>  fs/ocfs2/dlmglue.c | 18 ++++++++----------
>  fs/ocfs2/dlmglue.h |  6 ++----
>  3 files changed, 11 insertions(+), 15 deletions(-)
> 
> diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
> index 1896a1d24173..161f2dc00b00 100644
> --- a/fs/ocfs2/aops.c
> +++ b/fs/ocfs2/aops.c
> @@ -283,7 +283,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
>  
>  	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
>  
> -	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
> +	ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
>  	if (ret != 0) {
>  		if (ret == AOP_TRUNCATED_PAGE)
>  			unlock = 0;
> diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
> index 60df52e4c1f8..acebfa19ea6f 100644
> --- a/fs/ocfs2/dlmglue.c
> +++ b/fs/ocfs2/dlmglue.c
> @@ -2529,30 +2529,28 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
>  
>  /*
>   * This is working around a lock inversion between tasks acquiring DLM
> - * locks while holding a page lock and the downconvert thread which
> - * blocks dlm lock acquiry while acquiring page locks.
> + * locks while holding a folio lock and the downconvert thread which
> + * blocks dlm lock acquiry while acquiring folio locks.
>   *
> - * ** These _with_page variantes are only intended to be called from aop
> - * methods that hold page locks and return a very specific *positive* error
> + * ** These _with_folio variantes are only intended to be called from aop
> + * methods that hold folio locks and return a very specific *positive* error
>   * code that aop methods pass up to the VFS -- test for errors with != 0. **
>   *
>   * The DLM is called such that it returns -EAGAIN if it would have
>   * blocked waiting for the downconvert thread.  In that case we unlock
> - * our page so the downconvert thread can make progress.  Once we've
> + * our folio so the downconvert thread can make progress.  Once we've
>   * done this we have to return AOP_TRUNCATED_PAGE so the aop method
>   * that called us can bubble that back up into the VFS who will then
>   * immediately retry the aop call.
>   */
> -int ocfs2_inode_lock_with_page(struct inode *inode,
> -			      struct buffer_head **ret_bh,
> -			      int ex,
> -			      struct page *page)
> +int ocfs2_inode_lock_with_folio(struct inode *inode,
> +		struct buffer_head **ret_bh, int ex, struct folio *folio)
>  {
>  	int ret;
>  
>  	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
>  	if (ret == -EAGAIN) {
> -		unlock_page(page);
> +		folio_unlock(folio);
>  		/*
>  		 * If we can't get inode lock immediately, we should not return
>  		 * directly here, since this will lead to a softlockup problem.
> diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
> index e5da5809ed95..a3ebd7303ea2 100644
> --- a/fs/ocfs2/dlmglue.h
> +++ b/fs/ocfs2/dlmglue.h
> @@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
>  			 int ex,
>  			 int arg_flags,
>  			 int subclass);
> -int ocfs2_inode_lock_with_page(struct inode *inode,
> -			      struct buffer_head **ret_bh,
> -			      int ex,
> -			      struct page *page);
> +int ocfs2_inode_lock_with_folio(struct inode *inode,
> +		struct buffer_head **ret_bh, int ex, struct folio *folio);
>  /* Variants without special locking class or flags */
>  #define ocfs2_inode_lock_full(i, r, e, f)\
>  		ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
diff mbox series

Patch

diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1896a1d24173..161f2dc00b00 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -283,7 +283,7 @@  static int ocfs2_read_folio(struct file *file, struct folio *folio)
 
 	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
 
-	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
+	ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
 	if (ret != 0) {
 		if (ret == AOP_TRUNCATED_PAGE)
 			unlock = 0;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 60df52e4c1f8..acebfa19ea6f 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2529,30 +2529,28 @@  int ocfs2_inode_lock_full_nested(struct inode *inode,
 
 /*
  * This is working around a lock inversion between tasks acquiring DLM
- * locks while holding a page lock and the downconvert thread which
- * blocks dlm lock acquiry while acquiring page locks.
+ * locks while holding a folio lock and the downconvert thread which
+ * blocks dlm lock acquiry while acquiring folio locks.
  *
- * ** These _with_page variantes are only intended to be called from aop
- * methods that hold page locks and return a very specific *positive* error
+ * ** These _with_folio variantes are only intended to be called from aop
+ * methods that hold folio locks and return a very specific *positive* error
  * code that aop methods pass up to the VFS -- test for errors with != 0. **
  *
  * The DLM is called such that it returns -EAGAIN if it would have
  * blocked waiting for the downconvert thread.  In that case we unlock
- * our page so the downconvert thread can make progress.  Once we've
+ * our folio so the downconvert thread can make progress.  Once we've
  * done this we have to return AOP_TRUNCATED_PAGE so the aop method
  * that called us can bubble that back up into the VFS who will then
  * immediately retry the aop call.
  */
-int ocfs2_inode_lock_with_page(struct inode *inode,
-			      struct buffer_head **ret_bh,
-			      int ex,
-			      struct page *page)
+int ocfs2_inode_lock_with_folio(struct inode *inode,
+		struct buffer_head **ret_bh, int ex, struct folio *folio)
 {
 	int ret;
 
 	ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
 	if (ret == -EAGAIN) {
-		unlock_page(page);
+		folio_unlock(folio);
 		/*
 		 * If we can't get inode lock immediately, we should not return
 		 * directly here, since this will lead to a softlockup problem.
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index e5da5809ed95..a3ebd7303ea2 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -137,10 +137,8 @@  int ocfs2_inode_lock_full_nested(struct inode *inode,
 			 int ex,
 			 int arg_flags,
 			 int subclass);
-int ocfs2_inode_lock_with_page(struct inode *inode,
-			      struct buffer_head **ret_bh,
-			      int ex,
-			      struct page *page);
+int ocfs2_inode_lock_with_folio(struct inode *inode,
+		struct buffer_head **ret_bh, int ex, struct folio *folio);
 /* Variants without special locking class or flags */
 #define ocfs2_inode_lock_full(i, r, e, f)\
 		ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)