Message ID | 20230602222445.2284892-6-willy@infradead.org (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | Create large folios in iomap buffered write path | expand |
On Fri, Jun 02, 2023 at 11:24:42PM +0100, Matthew Wilcox (Oracle) wrote: > Allow callers of __filemap_get_folio() to specify a preferred folio > order in the FGP flags. This is only honoured in the FGP_CREATE path; > if there is already a folio in the page cache that covers the index, > we will return it, no matter what its order is. No create-around is > attempted; we will only create folios which start at the specified index. > Unmodified callers will continue to allocate order 0 folios. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > include/linux/pagemap.h | 23 ++++++++++++++++++++++ > mm/filemap.c | 42 ++++++++++++++++++++++++++++------------- > mm/readahead.c | 13 ------------- > 3 files changed, 52 insertions(+), 26 deletions(-) > > diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h > index 7ab57a2bb576..667ce668f438 100644 > --- a/include/linux/pagemap.h > +++ b/include/linux/pagemap.h > @@ -466,6 +466,19 @@ static inline void *detach_page_private(struct page *page) > return folio_detach_private(page_folio(page)); > } > > +/* > + * There are some parts of the kernel which assume that PMD entries > + * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, > + * limit the maximum allocation order to PMD size. I'm not aware of any > + * assumptions about maximum order if THP are disabled, but 8 seems like > + * a good order (that's 1MB if you're using 4kB pages) > + */ > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > +#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER > +#else > +#define MAX_PAGECACHE_ORDER 8 > +#endif > + > #ifdef CONFIG_NUMA > struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); > #else > @@ -531,9 +544,19 @@ typedef unsigned int __bitwise fgp_t; > #define FGP_NOWAIT ((__force fgp_t)0x00000020) > #define FGP_FOR_MMAP ((__force fgp_t)0x00000040) > #define FGP_STABLE ((__force fgp_t)0x00000080) > +#define FGP_GET_ORDER(fgp) (((__force unsigned)fgp) >> 26) /* top 6 bits */ > > #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) > > +static inline fgp_t fgp_set_order(size_t size) > +{ > + unsigned int shift = ilog2(size); > + > + if (shift <= PAGE_SHIFT) > + return 0; > + return (__force fgp_t)((shift - PAGE_SHIFT) << 26); > +} > + > void *filemap_get_entry(struct address_space *mapping, pgoff_t index); > struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > fgp_t fgp_flags, gfp_t gfp); > diff --git a/mm/filemap.c b/mm/filemap.c > index eb89a815f2f8..10ea9321c36e 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -1937,7 +1937,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > folio_wait_stable(folio); > no_page: > if (!folio && (fgp_flags & FGP_CREAT)) { > + unsigned order = FGP_GET_ORDER(fgp_flags); > int err; > + > if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) > gfp |= __GFP_WRITE; > if (fgp_flags & FGP_NOFS) > @@ -1946,26 +1948,40 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > gfp &= ~GFP_KERNEL; > gfp |= GFP_NOWAIT | __GFP_NOWARN; > } > - > - folio = filemap_alloc_folio(gfp, 0); > - if (!folio) > - return ERR_PTR(-ENOMEM); > - > if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) > fgp_flags |= FGP_LOCK; > > - /* Init accessed so avoid atomic mark_page_accessed later */ > - if (fgp_flags & FGP_ACCESSED) > - __folio_set_referenced(folio); > + if (!mapping_large_folio_support(mapping)) > + order = 0; > + if (order > MAX_PAGECACHE_ORDER) > + order = MAX_PAGECACHE_ORDER; > + /* If we're not aligned, allocate a smaller folio */ > + if (index & ((1UL << order) - 1)) > + order = __ffs(index); > > - err = filemap_add_folio(mapping, folio, index, gfp); > - if (unlikely(err)) { > + do { > + err = -ENOMEM; > + if (order == 1) > + order = 0; Doesn't this interrupt the scale-down progression 2M -> 1M -> 512K -> 256K -> 128K -> 64K -> 32K -> 16K -> 4k? What if I want 8k folios? --D > + folio = filemap_alloc_folio(gfp, order); > + if (!folio) > + continue; > + > + /* Init accessed so avoid atomic mark_page_accessed later */ > + if (fgp_flags & FGP_ACCESSED) > + __folio_set_referenced(folio); > + > + err = filemap_add_folio(mapping, folio, index, gfp); > + if (!err) > + break; > folio_put(folio); > folio = NULL; > - if (err == -EEXIST) > - goto repeat; > - } > + } while (order-- > 0); > > + if (err == -EEXIST) > + goto repeat; > + if (err) > + return ERR_PTR(err); > /* > * filemap_add_folio locks the page, and for mmap > * we expect an unlocked page. > diff --git a/mm/readahead.c b/mm/readahead.c > index 47afbca1d122..59a071badb90 100644 > --- a/mm/readahead.c > +++ b/mm/readahead.c > @@ -462,19 +462,6 @@ static int try_context_readahead(struct address_space *mapping, > return 1; > } > > -/* > - * There are some parts of the kernel which assume that PMD entries > - * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, > - * limit the maximum allocation order to PMD size. I'm not aware of any > - * assumptions about maximum order if THP are disabled, but 8 seems like > - * a good order (that's 1MB if you're using 4kB pages) > - */ > -#ifdef CONFIG_TRANSPARENT_HUGEPAGE > -#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER > -#else > -#define MAX_PAGECACHE_ORDER 8 > -#endif > - > static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, > pgoff_t mark, unsigned int order, gfp_t gfp) > { > -- > 2.39.2 >
On Sun, Jun 04, 2023 at 11:09:25AM -0700, Darrick J. Wong wrote: > On Fri, Jun 02, 2023 at 11:24:42PM +0100, Matthew Wilcox (Oracle) wrote: > > + do { > > + err = -ENOMEM; > > + if (order == 1) > > + order = 0; > > Doesn't this interrupt the scale-down progression 2M -> 1M -> 512K -> > 256K -> 128K -> 64K -> 32K -> 16K -> 4k? What if I want 8k folios? You can't have order-1 file/anon folios. We have deferred_list in the third page, so we have to have at least three pages in every large folio. I forget exactly what it's used for; maybe there's a way to do without it, but for now that's the rule.
Still not a huge fan of the dense encoding in the flags, but technically
this looks fine to me:
Reviewed-by: Christoph Hellwig <hch@lst.de>
On Sun, Jun 04, 2023 at 10:48:47PM +0100, Matthew Wilcox wrote: > On Sun, Jun 04, 2023 at 11:09:25AM -0700, Darrick J. Wong wrote: > > On Fri, Jun 02, 2023 at 11:24:42PM +0100, Matthew Wilcox (Oracle) wrote: > > > + do { > > > + err = -ENOMEM; > > > + if (order == 1) > > > + order = 0; > > > > Doesn't this interrupt the scale-down progression 2M -> 1M -> 512K -> > > 256K -> 128K -> 64K -> 32K -> 16K -> 4k? What if I want 8k folios? > > You can't have order-1 file/anon folios. We have deferred_list in the > third page, so we have to have at least three pages in every large folio. > I forget exactly what it's used for; maybe there's a way to do without > it, but for now that's the rule. Ahahaha, ok. I hadn't realized/remembered that. /me wonders if that ought to be captured in a header as some static inline clamping function instead of opencoded, but afaict there's only four places around the kernel that do/need this. Really it's a pity we can't do for order in 9 8 7 6 5 4 3 2 0; do Reviewed-by: Darrick J. Wong <djwong@kernel.org> --D
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7ab57a2bb576..667ce668f438 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -466,6 +466,19 @@ static inline void *detach_page_private(struct page *page) return folio_detach_private(page_folio(page)); } +/* + * There are some parts of the kernel which assume that PMD entries + * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, + * limit the maximum allocation order to PMD size. I'm not aware of any + * assumptions about maximum order if THP are disabled, but 8 seems like + * a good order (that's 1MB if you're using 4kB pages) + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER +#else +#define MAX_PAGECACHE_ORDER 8 +#endif + #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); #else @@ -531,9 +544,19 @@ typedef unsigned int __bitwise fgp_t; #define FGP_NOWAIT ((__force fgp_t)0x00000020) #define FGP_FOR_MMAP ((__force fgp_t)0x00000040) #define FGP_STABLE ((__force fgp_t)0x00000080) +#define FGP_GET_ORDER(fgp) (((__force unsigned)fgp) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) +static inline fgp_t fgp_set_order(size_t size) +{ + unsigned int shift = ilog2(size); + + if (shift <= PAGE_SHIFT) + return 0; + return (__force fgp_t)((shift - PAGE_SHIFT) << 26); +} + void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgp_t fgp_flags, gfp_t gfp); diff --git a/mm/filemap.c b/mm/filemap.c index eb89a815f2f8..10ea9321c36e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1937,7 +1937,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, folio_wait_stable(folio); no_page: if (!folio && (fgp_flags & FGP_CREAT)) { + unsigned order = FGP_GET_ORDER(fgp_flags); int err; + if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) gfp |= __GFP_WRITE; if (fgp_flags & FGP_NOFS) @@ -1946,26 +1948,40 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, gfp &= ~GFP_KERNEL; gfp |= GFP_NOWAIT | __GFP_NOWARN; } - - folio = filemap_alloc_folio(gfp, 0); - if (!folio) - return ERR_PTR(-ENOMEM); - if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) fgp_flags |= FGP_LOCK; - /* Init accessed so avoid atomic mark_page_accessed later */ - if (fgp_flags & FGP_ACCESSED) - __folio_set_referenced(folio); + if (!mapping_large_folio_support(mapping)) + order = 0; + if (order > MAX_PAGECACHE_ORDER) + order = MAX_PAGECACHE_ORDER; + /* If we're not aligned, allocate a smaller folio */ + if (index & ((1UL << order) - 1)) + order = __ffs(index); - err = filemap_add_folio(mapping, folio, index, gfp); - if (unlikely(err)) { + do { + err = -ENOMEM; + if (order == 1) + order = 0; + folio = filemap_alloc_folio(gfp, order); + if (!folio) + continue; + + /* Init accessed so avoid atomic mark_page_accessed later */ + if (fgp_flags & FGP_ACCESSED) + __folio_set_referenced(folio); + + err = filemap_add_folio(mapping, folio, index, gfp); + if (!err) + break; folio_put(folio); folio = NULL; - if (err == -EEXIST) - goto repeat; - } + } while (order-- > 0); + if (err == -EEXIST) + goto repeat; + if (err) + return ERR_PTR(err); /* * filemap_add_folio locks the page, and for mmap * we expect an unlocked page. diff --git a/mm/readahead.c b/mm/readahead.c index 47afbca1d122..59a071badb90 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -462,19 +462,6 @@ static int try_context_readahead(struct address_space *mapping, return 1; } -/* - * There are some parts of the kernel which assume that PMD entries - * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, - * limit the maximum allocation order to PMD size. I'm not aware of any - * assumptions about maximum order if THP are disabled, but 8 seems like - * a good order (that's 1MB if you're using 4kB pages) - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER -#else -#define MAX_PAGECACHE_ORDER 8 -#endif - static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, pgoff_t mark, unsigned int order, gfp_t gfp) {
Allow callers of __filemap_get_folio() to specify a preferred folio order in the FGP flags. This is only honoured in the FGP_CREATE path; if there is already a folio in the page cache that covers the index, we will return it, no matter what its order is. No create-around is attempted; we will only create folios which start at the specified index. Unmodified callers will continue to allocate order 0 folios. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/pagemap.h | 23 ++++++++++++++++++++++ mm/filemap.c | 42 ++++++++++++++++++++++++++++------------- mm/readahead.c | 13 ------------- 3 files changed, 52 insertions(+), 26 deletions(-)