diff mbox series

shmem: Convert shmem_write_end() to use a folio

Message ID 20230112131031.1209553-1-willy@infradead.org (mailing list archive)
State New
Headers show
Series shmem: Convert shmem_write_end() to use a folio | expand

Commit Message

Matthew Wilcox (Oracle) Jan. 12, 2023, 1:10 p.m. UTC
Use a folio internally to shmem_write_end() which saves a number of
calls to compound_head() and lets us get rid of the custom code to
zero out the rest of a THP and supports folios of arbitrary size.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/shmem.c | 30 ++++++++++--------------------
 1 file changed, 10 insertions(+), 20 deletions(-)

Comments

William Kucharski Jan. 12, 2023, 5:40 p.m. UTC | #1
Looks good to me.

Reviewed-by: William Kucharski <william.kucharski@oracle.com>

> On Jan 12, 2023, at 6:10 AM, Matthew Wilcox (Oracle) <willy@infradead.org> wrote:
> 
> Use a folio internally to shmem_write_end() which saves a number of
> calls to compound_head() and lets us get rid of the custom code to
> zero out the rest of a THP and supports folios of arbitrary size.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/shmem.c | 30 ++++++++++--------------------
> 1 file changed, 10 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index bc5c156ef470..c5048c6c83dd 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -2578,33 +2578,23 @@ shmem_write_end(struct file *file, struct address_space *mapping,
> loff_t pos, unsigned len, unsigned copied,
> struct page *page, void *fsdata)
> {
> + struct folio *folio = page_folio(page);
> struct inode *inode = mapping->host;
> 
> if (pos + copied > inode->i_size)
> i_size_write(inode, pos + copied);
> 
> - if (!PageUptodate(page)) {
> - struct page *head = compound_head(page);
> - if (PageTransCompound(page)) {
> - int i;
> -
> - for (i = 0; i < HPAGE_PMD_NR; i++) {
> - if (head + i == page)
> - continue;
> - clear_highpage(head + i);
> - flush_dcache_page(head + i);
> - }
> - }
> - if (copied < PAGE_SIZE) {
> - unsigned from = pos & (PAGE_SIZE - 1);
> - zero_user_segments(page, 0, from,
> - from + copied, PAGE_SIZE);
> + if (!folio_test_uptodate(folio)) {
> + if (copied < folio_size(folio)) {
> + size_t from = offset_in_folio(folio, pos);
> + folio_zero_segments(folio, 0, from,
> + from + copied, folio_size(folio));
> }
> - SetPageUptodate(head);
> + folio_mark_uptodate(folio);
> }
> - set_page_dirty(page);
> - unlock_page(page);
> - put_page(page);
> + folio_mark_dirty(folio);
> + folio_unlock(folio);
> + folio_put(folio);
> 
> return copied;
> }
> -- 
> 2.35.1
> 
>
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index bc5c156ef470..c5048c6c83dd 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2578,33 +2578,23 @@  shmem_write_end(struct file *file, struct address_space *mapping,
 			loff_t pos, unsigned len, unsigned copied,
 			struct page *page, void *fsdata)
 {
+	struct folio *folio = page_folio(page);
 	struct inode *inode = mapping->host;
 
 	if (pos + copied > inode->i_size)
 		i_size_write(inode, pos + copied);
 
-	if (!PageUptodate(page)) {
-		struct page *head = compound_head(page);
-		if (PageTransCompound(page)) {
-			int i;
-
-			for (i = 0; i < HPAGE_PMD_NR; i++) {
-				if (head + i == page)
-					continue;
-				clear_highpage(head + i);
-				flush_dcache_page(head + i);
-			}
-		}
-		if (copied < PAGE_SIZE) {
-			unsigned from = pos & (PAGE_SIZE - 1);
-			zero_user_segments(page, 0, from,
-					from + copied, PAGE_SIZE);
+	if (!folio_test_uptodate(folio)) {
+		if (copied < folio_size(folio)) {
+			size_t from = offset_in_folio(folio, pos);
+			folio_zero_segments(folio, 0, from,
+					from + copied, folio_size(folio));
 		}
-		SetPageUptodate(head);
+		folio_mark_uptodate(folio);
 	}
-	set_page_dirty(page);
-	unlock_page(page);
-	put_page(page);
+	folio_mark_dirty(folio);
+	folio_unlock(folio);
+	folio_put(folio);
 
 	return copied;
 }