Message ID | 20240916165743.201087-3-shivankg@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add NUMA mempolicy support for KVM guest_memfd | expand |
On Mon, Sep 16, 2024 at 04:57:42PM +0000, Shivank Garg wrote: > @@ -652,6 +660,8 @@ static inline fgf_t fgf_set_order(size_t size) > void *filemap_get_entry(struct address_space *mapping, pgoff_t index); > struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > fgf_t fgp_flags, gfp_t gfp); > +struct folio *__filemap_get_folio_mpol(struct address_space *mapping, > + pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *mpol); > struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, > fgf_t fgp_flags, gfp_t gfp); > > @@ -710,6 +720,26 @@ static inline struct folio *filemap_grab_folio(struct address_space *mapping, > mapping_gfp_mask(mapping)); > } > > +/** > + * filemap_grab_folio_mpol - grab a folio from the page cache > + * @mapping: The address space to search > + * @index: The page index > + * @mpol: The mempolicy to apply > + * > + * Same as filemap_grab_folio(), except that it allocates the folio using > + * given memory policy. > + * > + * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found > + * and failed to create a folio. > + */ > +static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping, > + pgoff_t index, struct mempolicy *mpol) > +{ > + return __filemap_get_folio_mpol(mapping, index, > + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, > + mapping_gfp_mask(mapping), mpol); > +} This should be conditional on CONFIG_NUMA, just like filemap_alloc_folio_mpol_noprof() above. > @@ -1947,7 +1959,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, > err = -ENOMEM; > if (order > 0) > alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; > - folio = filemap_alloc_folio(alloc_gfp, order); > + folio = filemap_alloc_folio_mpol_noprof(alloc_gfp, order, mpol); Why use the _noprof variant here? > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index 9e9450433fcc..88da732cf2be 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -2281,6 +2281,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, > return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP, > order, pol, ilx, nid)); > } > +EXPORT_SYMBOL(folio_alloc_mpol_noprof); Why does this need to be exported? What module will use it?
Hello Matthew, Thank you for the review comments. On 9/17/2024 3:12 AM, Matthew Wilcox wrote: > On Mon, Sep 16, 2024 at 04:57:42PM +0000, Shivank Garg wrote: >> +static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping, >> + pgoff_t index, struct mempolicy *mpol) >> +{ >> + return __filemap_get_folio_mpol(mapping, index, >> + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, >> + mapping_gfp_mask(mapping), mpol); >> +} > > This should be conditional on CONFIG_NUMA, just like > filemap_alloc_folio_mpol_noprof() above. +#ifdef CONFIG_NUMA static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping, pgoff_t index, struct mempolicy *mpol) { @@ -739,6 +742,13 @@ static inline struct folio *filemap_grab_folio_mpol(struct address_space *mappin FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping_gfp_mask(mapping), mpol); } +#else +static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping, + pgoff_t index, struct mempolicy *mpol) +{ + return filemap_grab_folio(mapping, index); +} +#endif /* CONFIG_NUMA */ > >> @@ -1947,7 +1959,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, >> err = -ENOMEM; >> if (order > 0) >> alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; >> - folio = filemap_alloc_folio(alloc_gfp, order); >> + folio = filemap_alloc_folio_mpol_noprof(alloc_gfp, order, mpol); > > Why use the _noprof variant here? I've defined the filemap_alloc_folio_mpol variant for using here: +#define filemap_alloc_folio_mpol(...) \ + alloc_hooks(filemap_alloc_folio_mpol_noprof(__VA_ARGS__)) +++ b/mm/filemap.c @@ -1959,7 +1959,7 @@ struct folio *__filemap_get_folio_mpol(struct address_space *mapping, pgoff_t in err = -ENOMEM; if (order > 0) alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; - folio = filemap_alloc_folio_mpol_noprof(alloc_gfp, order, mpol); + folio = filemap_alloc_folio_mpol(alloc_gfp, order, mpol); if (!folio) > >> diff --git a/mm/mempolicy.c b/mm/mempolicy.c >> index 9e9450433fcc..88da732cf2be 100644 >> --- a/mm/mempolicy.c >> +++ b/mm/mempolicy.c >> @@ -2281,6 +2281,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, >> return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP, >> order, pol, ilx, nid)); >> } >> +EXPORT_SYMBOL(folio_alloc_mpol_noprof); > > Why does this need to be exported? What module will use itI've removed this EXPORT. Thank you for the suggestion. I overlooked those details and will post the replied changes in next version of this patchset. Best Regards, Shivank
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d9c7edb6422b..da7e41a45588 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -564,11 +564,19 @@ static inline void *detach_page_private(struct page *page) #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); +struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *mpol); #else static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { return folio_alloc_noprof(gfp, order); } +static inline struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp, + unsigned int order, + struct mempolicy *mpol) +{ + return filemap_alloc_folio_noprof(gfp, order); +} #endif #define filemap_alloc_folio(...) \ @@ -652,6 +660,8 @@ static inline fgf_t fgf_set_order(size_t size) void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); +struct folio *__filemap_get_folio_mpol(struct address_space *mapping, + pgoff_t index, fgf_t fgp_flags, gfp_t gfp, struct mempolicy *mpol); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); @@ -710,6 +720,26 @@ static inline struct folio *filemap_grab_folio(struct address_space *mapping, mapping_gfp_mask(mapping)); } +/** + * filemap_grab_folio_mpol - grab a folio from the page cache + * @mapping: The address space to search + * @index: The page index + * @mpol: The mempolicy to apply + * + * Same as filemap_grab_folio(), except that it allocates the folio using + * given memory policy. + * + * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found + * and failed to create a folio. + */ +static inline struct folio *filemap_grab_folio_mpol(struct address_space *mapping, + pgoff_t index, struct mempolicy *mpol) +{ + return __filemap_get_folio_mpol(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + mapping_gfp_mask(mapping), mpol); +} + /** * find_get_page - find and get a page reference * @mapping: the address_space to search diff --git a/mm/filemap.c b/mm/filemap.c index d62150418b91..a94022e31974 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -990,8 +990,13 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, EXPORT_SYMBOL_GPL(filemap_add_folio); #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) +struct folio *filemap_alloc_folio_mpol_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *mpol) { + if (mpol) + return folio_alloc_mpol_noprof(gfp, order, mpol, + NO_INTERLEAVE_INDEX, numa_node_id()); + int n; struct folio *folio; @@ -1007,6 +1012,12 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) } return folio_alloc_noprof(gfp, order); } +EXPORT_SYMBOL(filemap_alloc_folio_mpol_noprof); + +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) +{ + return filemap_alloc_folio_mpol_noprof(gfp, order, NULL); +} EXPORT_SYMBOL(filemap_alloc_folio_noprof); #endif @@ -1861,11 +1872,12 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index) } /** - * __filemap_get_folio - Find and get a reference to a folio. + * __filemap_get_folio_mpol - Find and get a reference to a folio. * @mapping: The address_space to search. * @index: The page index. * @fgp_flags: %FGP flags modify how the folio is returned. * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. + * @mpol: The mempolicy to apply. * * Looks up the page cache entry at @mapping & @index. * @@ -1876,8 +1888,8 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index) * * Return: The found folio or an ERR_PTR() otherwise. */ -struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, - fgf_t fgp_flags, gfp_t gfp) +struct folio *__filemap_get_folio_mpol(struct address_space *mapping, pgoff_t index, + fgf_t fgp_flags, gfp_t gfp, struct mempolicy *mpol) { struct folio *folio; @@ -1947,7 +1959,7 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, err = -ENOMEM; if (order > 0) alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; - folio = filemap_alloc_folio(alloc_gfp, order); + folio = filemap_alloc_folio_mpol_noprof(alloc_gfp, order, mpol); if (!folio) continue; @@ -1978,6 +1990,14 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, return ERR_PTR(-ENOENT); return folio; } +EXPORT_SYMBOL(__filemap_get_folio_mpol); + +struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, + fgf_t fgp_flags, gfp_t gfp) +{ + return __filemap_get_folio_mpol(mapping, index, + fgp_flags, gfp, NULL); +} EXPORT_SYMBOL(__filemap_get_folio); static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9e9450433fcc..88da732cf2be 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2281,6 +2281,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP, order, pol, ilx, nid)); } +EXPORT_SYMBOL(folio_alloc_mpol_noprof); /** * vma_alloc_folio - Allocate a folio for a VMA.