diff mbox series

[2/2] jffs2: Use a folio in jffs2_garbage_collect_dnode()

Message ID 20240814195915.249871-3-willy@infradead.org (mailing list archive)
State New
Headers show
Series Finish converting jffs2 to folios | expand

Commit Message

Matthew Wilcox Aug. 14, 2024, 7:59 p.m. UTC
Call read_cache_folio() instead of read_cache_page() to get the folio
containing the page.  No attempt is made here to support large folios
as I assume that will never be interesting for jffs2.  Includes a switch
from kmap to kmap_local which looks safe.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/jffs2/gc.c | 25 ++++++++++++-------------
 1 file changed, 12 insertions(+), 13 deletions(-)

Comments

Zhihao Cheng Aug. 16, 2024, 1:10 p.m. UTC | #1
在 2024/8/15 3:59, Matthew Wilcox (Oracle) 写道:
> Call read_cache_folio() instead of read_cache_page() to get the folio
> containing the page.  No attempt is made here to support large folios
> as I assume that will never be interesting for jffs2.  Includes a switch
> from kmap to kmap_local which looks safe.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   fs/jffs2/gc.c | 25 ++++++++++++-------------
>   1 file changed, 12 insertions(+), 13 deletions(-)

Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
> 
> diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
> index 5c6602f3c189..822949d0eb00 100644
> --- a/fs/jffs2/gc.c
> +++ b/fs/jffs2/gc.c
> @@ -1171,7 +1171,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
>   	uint32_t alloclen, offset, orig_end, orig_start;
>   	int ret = 0;
>   	unsigned char *comprbuf = NULL, *writebuf;
> -	struct page *page;
> +	struct folio *folio;
>   	unsigned char *pg_ptr;
>   
>   	memset(&ri, 0, sizeof(ri));
> @@ -1317,25 +1317,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
>   		BUG_ON(start > orig_start);
>   	}
>   
> -	/* The rules state that we must obtain the page lock *before* f->sem, so
> +	/* The rules state that we must obtain the folio lock *before* f->sem, so
>   	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
>   	 * actually going to *change* so we're safe; we only allow reading.
>   	 *
>   	 * It is important to note that jffs2_write_begin() will ensure that its
> -	 * page is marked Uptodate before allocating space. That means that if we
> -	 * end up here trying to GC the *same* page that jffs2_write_begin() is
> -	 * trying to write out, read_cache_page() will not deadlock. */
> +	 * folio is marked uptodate before allocating space. That means that if we
> +	 * end up here trying to GC the *same* folio that jffs2_write_begin() is
> +	 * trying to write out, read_cache_folio() will not deadlock. */
>   	mutex_unlock(&f->sem);
> -	page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
> +	folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
>   			       __jffs2_read_folio, NULL);
> -	if (IS_ERR(page)) {
> -		pr_warn("read_cache_page() returned error: %ld\n",
> -			PTR_ERR(page));
> +	if (IS_ERR(folio)) {
> +		pr_warn("read_cache_folio() returned error: %ld\n",
> +			PTR_ERR(folio));
>   		mutex_lock(&f->sem);
> -		return PTR_ERR(page);
> +		return PTR_ERR(folio);
>   	}
>   
> -	pg_ptr = kmap(page);
> +	pg_ptr = kmap_local_folio(folio, 0);
>   	mutex_lock(&f->sem);
>   
>   	offset = start;
> @@ -1400,7 +1400,6 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
>   		}
>   	}
>   
> -	kunmap(page);
> -	put_page(page);
> +	folio_release_kmap(folio, pg_ptr);
>   	return ret;
>   }
>
diff mbox series

Patch

diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 5c6602f3c189..822949d0eb00 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -1171,7 +1171,7 @@  static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
 	uint32_t alloclen, offset, orig_end, orig_start;
 	int ret = 0;
 	unsigned char *comprbuf = NULL, *writebuf;
-	struct page *page;
+	struct folio *folio;
 	unsigned char *pg_ptr;
 
 	memset(&ri, 0, sizeof(ri));
@@ -1317,25 +1317,25 @@  static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
 		BUG_ON(start > orig_start);
 	}
 
-	/* The rules state that we must obtain the page lock *before* f->sem, so
+	/* The rules state that we must obtain the folio lock *before* f->sem, so
 	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
 	 * actually going to *change* so we're safe; we only allow reading.
 	 *
 	 * It is important to note that jffs2_write_begin() will ensure that its
-	 * page is marked Uptodate before allocating space. That means that if we
-	 * end up here trying to GC the *same* page that jffs2_write_begin() is
-	 * trying to write out, read_cache_page() will not deadlock. */
+	 * folio is marked uptodate before allocating space. That means that if we
+	 * end up here trying to GC the *same* folio that jffs2_write_begin() is
+	 * trying to write out, read_cache_folio() will not deadlock. */
 	mutex_unlock(&f->sem);
-	page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
+	folio = read_cache_folio(inode->i_mapping, start >> PAGE_SHIFT,
 			       __jffs2_read_folio, NULL);
-	if (IS_ERR(page)) {
-		pr_warn("read_cache_page() returned error: %ld\n",
-			PTR_ERR(page));
+	if (IS_ERR(folio)) {
+		pr_warn("read_cache_folio() returned error: %ld\n",
+			PTR_ERR(folio));
 		mutex_lock(&f->sem);
-		return PTR_ERR(page);
+		return PTR_ERR(folio);
 	}
 
-	pg_ptr = kmap(page);
+	pg_ptr = kmap_local_folio(folio, 0);
 	mutex_lock(&f->sem);
 
 	offset = start;
@@ -1400,7 +1400,6 @@  static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
 		}
 	}
 
-	kunmap(page);
-	put_page(page);
+	folio_release_kmap(folio, pg_ptr);
 	return ret;
 }