diff mbox series

[v3,04/12] fuse: support large folios for writethrough writes

Message ID 20241213221818.322371-5-joannelkoong@gmail.com (mailing list archive)
State New
Headers show
Series fuse: support large folios | expand

Commit Message

Joanne Koong Dec. 13, 2024, 10:18 p.m. UTC
Add support for folios larger than one page size for writethrough
writes.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
---
 fs/fuse/file.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

Comments

Jeff Layton Dec. 19, 2024, 6:08 p.m. UTC | #1
On Fri, 2024-12-13 at 14:18 -0800, Joanne Koong wrote:
> Add support for folios larger than one page size for writethrough
> writes.
> 
> Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
> ---
>  fs/fuse/file.c | 19 ++++++++++++-------
>  1 file changed, 12 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/fuse/file.c b/fs/fuse/file.c
> index c041bb328203..84e39426862a 100644
> --- a/fs/fuse/file.c
> +++ b/fs/fuse/file.c
> @@ -1135,6 +1135,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
>  				     struct iov_iter *ii, loff_t pos,
>  				     unsigned int max_pages)
>  {
> +	size_t max_folio_size = mapping_max_folio_size(mapping);
>  	struct fuse_args_pages *ap = &ia->ap;
>  	struct fuse_conn *fc = get_fuse_conn(mapping->host);
>  	unsigned offset = pos & (PAGE_SIZE - 1);
> @@ -1146,17 +1147,17 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
>  	num = min(num, max_pages << PAGE_SHIFT);
>  
>  	ap->args.in_pages = true;
> -	ap->descs[0].offset = offset;
>  
>  	while (num) {
>  		size_t tmp;
>  		struct folio *folio;
>  		pgoff_t index = pos >> PAGE_SHIFT;
> -		unsigned int bytes = min(PAGE_SIZE - offset, num);
> +		unsigned int bytes;
> +		unsigned int folio_offset;
>  
>   again:
>  		err = -EFAULT;
> -		if (fault_in_iov_iter_readable(ii, bytes))
> +		if (fault_in_iov_iter_readable(ii, max_folio_size) == max_folio_size)
>  			break;
>  
>  		folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
> @@ -1169,7 +1170,10 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
>  		if (mapping_writably_mapped(mapping))
>  			flush_dcache_folio(folio);
>  
> -		tmp = copy_folio_from_iter_atomic(folio, offset, bytes, ii);
> +		folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
> +		bytes = min(folio_size(folio) - folio_offset, num);
> +
> +		tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);

Just to save someone else going down the same rabbit hole:

copy_folio_from_iter_atomic() is defined as:

static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
                size_t offset, size_t bytes, struct iov_iter *i)
{
        return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
}

...which _looks_ sort of like it's not fully baked yet and can't handle
a large folio, but it turns out that copy_page_from_iter_atomic() can
handle compound pages, so I think this is actually OK. Whew!


>  		flush_dcache_folio(folio);
>  
>  		if (!tmp) {
> @@ -1180,6 +1184,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
>  
>  		err = 0;
>  		ap->folios[ap->num_folios] = folio;
> +		ap->descs[ap->num_folios].offset = folio_offset;
>  		ap->descs[ap->num_folios].length = tmp;
>  		ap->num_folios++;
>  
> @@ -1187,11 +1192,11 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
>  		pos += tmp;
>  		num -= tmp;
>  		offset += tmp;
> -		if (offset == PAGE_SIZE)
> +		if (offset == folio_size(folio))
>  			offset = 0;
>  
> -		/* If we copied full page, mark it uptodate */
> -		if (tmp == PAGE_SIZE)
> +		/* If we copied full folio, mark it uptodate */
> +		if (tmp == folio_size(folio))
>  			folio_mark_uptodate(folio);
>  
>  		if (folio_test_uptodate(folio)) {
Matthew Wilcox (Oracle) Dec. 19, 2024, 8:24 p.m. UTC | #2
On Thu, Dec 19, 2024 at 01:08:15PM -0500, Jeff Layton wrote:
> > +		tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
> 
> Just to save someone else going down the same rabbit hole:
> 
> copy_folio_from_iter_atomic() is defined as:
> 
> static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
>                 size_t offset, size_t bytes, struct iov_iter *i)
> {
>         return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
> }
> 
> ...which _looks_ sort of like it's not fully baked yet and can't handle
> a large folio, but it turns out that copy_page_from_iter_atomic() can
> handle compound pages, so I think this is actually OK. Whew!

Yes, this is fine.  I'd love to clean this up further.  If someone wants
the kudos of doing that, ntfs_compress_write() is your challenge.  It's
the only remaining caller of copy_page_from_iter_atomic() and
once it's converted to use folios, we can push folios deeper into
the iov_iter code.
diff mbox series

Patch

diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c041bb328203..84e39426862a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1135,6 +1135,7 @@  static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 				     struct iov_iter *ii, loff_t pos,
 				     unsigned int max_pages)
 {
+	size_t max_folio_size = mapping_max_folio_size(mapping);
 	struct fuse_args_pages *ap = &ia->ap;
 	struct fuse_conn *fc = get_fuse_conn(mapping->host);
 	unsigned offset = pos & (PAGE_SIZE - 1);
@@ -1146,17 +1147,17 @@  static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 	num = min(num, max_pages << PAGE_SHIFT);
 
 	ap->args.in_pages = true;
-	ap->descs[0].offset = offset;
 
 	while (num) {
 		size_t tmp;
 		struct folio *folio;
 		pgoff_t index = pos >> PAGE_SHIFT;
-		unsigned int bytes = min(PAGE_SIZE - offset, num);
+		unsigned int bytes;
+		unsigned int folio_offset;
 
  again:
 		err = -EFAULT;
-		if (fault_in_iov_iter_readable(ii, bytes))
+		if (fault_in_iov_iter_readable(ii, max_folio_size) == max_folio_size)
 			break;
 
 		folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
@@ -1169,7 +1170,10 @@  static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_folio(folio);
 
-		tmp = copy_folio_from_iter_atomic(folio, offset, bytes, ii);
+		folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+		bytes = min(folio_size(folio) - folio_offset, num);
+
+		tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
 		flush_dcache_folio(folio);
 
 		if (!tmp) {
@@ -1180,6 +1184,7 @@  static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 
 		err = 0;
 		ap->folios[ap->num_folios] = folio;
+		ap->descs[ap->num_folios].offset = folio_offset;
 		ap->descs[ap->num_folios].length = tmp;
 		ap->num_folios++;
 
@@ -1187,11 +1192,11 @@  static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
 		pos += tmp;
 		num -= tmp;
 		offset += tmp;
-		if (offset == PAGE_SIZE)
+		if (offset == folio_size(folio))
 			offset = 0;
 
-		/* If we copied full page, mark it uptodate */
-		if (tmp == PAGE_SIZE)
+		/* If we copied full folio, mark it uptodate */
+		if (tmp == folio_size(folio))
 			folio_mark_uptodate(folio);
 
 		if (folio_test_uptodate(folio)) {