diff mbox series

[07/14] iomap: Support THPs in readpage

Message ID 20201014030357.21898-8-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Transparent Huge Page support for XFS | expand

Commit Message

Matthew Wilcox Oct. 14, 2020, 3:03 a.m. UTC
The VFS only calls readpage if readahead has encountered an error.
Assume that any error requires the page to be split, and attempt to
do so.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 39 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 37 insertions(+), 2 deletions(-)

Comments

Darrick J. Wong Oct. 14, 2020, 4:39 p.m. UTC | #1
On Wed, Oct 14, 2020 at 04:03:50AM +0100, Matthew Wilcox (Oracle) wrote:
> The VFS only calls readpage if readahead has encountered an error.
> Assume that any error requires the page to be split, and attempt to
> do so.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  fs/iomap/buffered-io.c | 39 +++++++++++++++++++++++++++++++++++++--
>  1 file changed, 37 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 4ea6c601a183..ca305fbaf811 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -343,15 +343,50 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  	return pos - orig_pos + plen;
>  }
>  
> +/*
> + * The page that was passed in has become Uptodate.  This may be due to
> + * the storage being synchronous or due to a page split finding the page
> + * is actually uptodate.  The page is still locked.
> + * Lift this into the VFS at some point.
> + */
> +#define AOP_UPDATED_PAGE       (AOP_TRUNCATED_PAGE + 1)

Er... why not lift it now?

"Because touching fs.h causes the whole kernel to be rebuilt and that's
annoying"? :D

--D

> +static int iomap_split_page(struct inode *inode, struct page *page)
> +{
> +	struct page *head = thp_head(page);
> +	bool uptodate = iomap_range_uptodate(inode, head,
> +				(page - head) * PAGE_SIZE, PAGE_SIZE);
> +
> +	iomap_page_release(head);
> +	if (split_huge_page(page) < 0) {
> +		unlock_page(page);
> +		return AOP_TRUNCATED_PAGE;
> +	}
> +	if (!uptodate)
> +		return 0;
> +	SetPageUptodate(page);
> +	return AOP_UPDATED_PAGE;
> +}
> +
>  int
>  iomap_readpage(struct page *page, const struct iomap_ops *ops)
>  {
>  	struct iomap_readpage_ctx ctx = { .cur_page = page };
> -	struct inode *inode = page->mapping->host;
> +	struct inode *inode = thp_head(page)->mapping->host;
>  	unsigned poff;
>  	loff_t ret;
>  
> -	trace_iomap_readpage(page->mapping->host, 1);
> +	trace_iomap_readpage(inode, 1);
> +
> +	if (PageTransCompound(page)) {
> +		int status = iomap_split_page(inode, page);
> +		if (status == AOP_UPDATED_PAGE) {
> +			unlock_page(page);

/me wonders why not do the unlock in iomap_split_page?

--D

> +			return 0;
> +		}
> +		if (status)
> +			return status;
> +	}
>  
>  	for (poff = 0; poff < PAGE_SIZE; poff += ret) {
>  		ret = iomap_apply(inode, page_offset(page) + poff,
> -- 
> 2.28.0
>
Matthew Wilcox Oct. 14, 2020, 5:35 p.m. UTC | #2
On Wed, Oct 14, 2020 at 09:39:36AM -0700, Darrick J. Wong wrote:
> On Wed, Oct 14, 2020 at 04:03:50AM +0100, Matthew Wilcox (Oracle) wrote:
> > +/*
> > + * The page that was passed in has become Uptodate.  This may be due to
> > + * the storage being synchronous or due to a page split finding the page
> > + * is actually uptodate.  The page is still locked.
> > + * Lift this into the VFS at some point.
> > + */
> > +#define AOP_UPDATED_PAGE       (AOP_TRUNCATED_PAGE + 1)
> 
> Er... why not lift it now?
> 
> "Because touching fs.h causes the whole kernel to be rebuilt and that's
> annoying"? :D

Hah!  Because I already have a patch series out that does lift it to
the VFS:

https://lore.kernel.org/linux-fsdevel/20201009143104.22673-1-willy@infradead.org/

which I'm kind of hoping gets merged first.  Then I can adjust this patch.

> > +static int iomap_split_page(struct inode *inode, struct page *page)
> > +{
> > +	struct page *head = thp_head(page);
> > +	bool uptodate = iomap_range_uptodate(inode, head,
> > +				(page - head) * PAGE_SIZE, PAGE_SIZE);
> > +
> > +	iomap_page_release(head);
> > +	if (split_huge_page(page) < 0) {
> > +		unlock_page(page);
> > +		return AOP_TRUNCATED_PAGE;
> > +	}
> > +	if (!uptodate)
> > +		return 0;
> > +	SetPageUptodate(page);
> > +	return AOP_UPDATED_PAGE;
> > +}
> > +
> >  int
> >  iomap_readpage(struct page *page, const struct iomap_ops *ops)
> >  {
> >  	struct iomap_readpage_ctx ctx = { .cur_page = page };
> > -	struct inode *inode = page->mapping->host;
> > +	struct inode *inode = thp_head(page)->mapping->host;
> >  	unsigned poff;
> >  	loff_t ret;
> >  
> > -	trace_iomap_readpage(page->mapping->host, 1);
> > +	trace_iomap_readpage(inode, 1);
> > +
> > +	if (PageTransCompound(page)) {
> > +		int status = iomap_split_page(inode, page);
> > +		if (status == AOP_UPDATED_PAGE) {
> > +			unlock_page(page);
> 
> /me wonders why not do the unlock in iomap_split_page?

This is working around AOP_UPDATED_PAGE not existing in the VFS.  So
adjusting this patch becomes:

		int status = iomap_split_page(inode, page);
		if (status)
			return status;
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 4ea6c601a183..ca305fbaf811 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -343,15 +343,50 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	return pos - orig_pos + plen;
 }
 
+/*
+ * The page that was passed in has become Uptodate.  This may be due to
+ * the storage being synchronous or due to a page split finding the page
+ * is actually uptodate.  The page is still locked.
+ * Lift this into the VFS at some point.
+ */
+#define AOP_UPDATED_PAGE       (AOP_TRUNCATED_PAGE + 1)
+
+static int iomap_split_page(struct inode *inode, struct page *page)
+{
+	struct page *head = thp_head(page);
+	bool uptodate = iomap_range_uptodate(inode, head,
+				(page - head) * PAGE_SIZE, PAGE_SIZE);
+
+	iomap_page_release(head);
+	if (split_huge_page(page) < 0) {
+		unlock_page(page);
+		return AOP_TRUNCATED_PAGE;
+	}
+	if (!uptodate)
+		return 0;
+	SetPageUptodate(page);
+	return AOP_UPDATED_PAGE;
+}
+
 int
 iomap_readpage(struct page *page, const struct iomap_ops *ops)
 {
 	struct iomap_readpage_ctx ctx = { .cur_page = page };
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = thp_head(page)->mapping->host;
 	unsigned poff;
 	loff_t ret;
 
-	trace_iomap_readpage(page->mapping->host, 1);
+	trace_iomap_readpage(inode, 1);
+
+	if (PageTransCompound(page)) {
+		int status = iomap_split_page(inode, page);
+		if (status == AOP_UPDATED_PAGE) {
+			unlock_page(page);
+			return 0;
+		}
+		if (status)
+			return status;
+	}
 
 	for (poff = 0; poff < PAGE_SIZE; poff += ret) {
 		ret = iomap_apply(inode, page_offset(page) + poff,