@@ -3882,9 +3882,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_add_lru(folio);
- /* To provide entry to swap_readpage() */
+ /* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_readpage(page, true, NULL);
+ swap_read_folio(folio, true, NULL);
folio->private = NULL;
}
} else {
@@ -420,7 +420,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}
-static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
+static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
struct swap_iocb *sio = NULL;
@@ -454,7 +454,7 @@ static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug)
*plug = sio;
}
-static void swap_readpage_bdev_sync(struct folio *folio,
+static void swap_read_folio_bdev_sync(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio_vec bv;
@@ -474,7 +474,7 @@ static void swap_readpage_bdev_sync(struct folio *folio,
put_task_struct(current);
}
-static void swap_readpage_bdev_async(struct folio *folio,
+static void swap_read_folio_bdev_async(struct folio *folio,
struct swap_info_struct *sis)
{
struct bio *bio;
@@ -487,10 +487,10 @@ static void swap_readpage_bdev_async(struct folio *folio,
submit_bio(bio);
}
-void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, bool synchronous,
+ struct swap_iocb **plug)
{
- struct folio *folio = page_folio(page);
- struct swap_info_struct *sis = page_swap_info(page);
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
bool workingset = folio_test_workingset(folio);
unsigned long pflags;
bool in_thrashing;
@@ -514,11 +514,11 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
folio_mark_uptodate(folio);
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
- swap_readpage_fs(folio, plug);
+ swap_read_folio_fs(folio, plug);
} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
- swap_readpage_bdev_sync(folio, sis);
+ swap_read_folio_bdev_sync(folio, sis);
} else {
- swap_readpage_bdev_async(folio, sis);
+ swap_read_folio_bdev_async(folio, sis);
}
if (workingset) {
@@ -10,7 +10,8 @@ struct mempolicy;
/* linux/mm/page_io.c */
int sio_pool_init(void);
struct swap_iocb;
-void swap_readpage(struct page *page, bool do_poll, struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, bool do_poll,
+ struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
@@ -63,7 +64,7 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
}
#else /* CONFIG_SWAP */
struct swap_iocb;
-static inline void swap_readpage(struct page *page, bool do_poll,
+static inline void swap_read_folio(struct folio *folio, bool do_poll,
struct swap_iocb **plug)
{
}
@@ -539,7 +539,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
@@ -557,7 +557,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
mpol_cond_put(mpol);
if (page_allocated)
- swap_readpage(&folio->page, false, plug);
+ swap_read_folio(folio, false, plug);
return folio_file_page(folio, swp_offset(entry));
}
@@ -674,7 +674,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_readpage(&folio->page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (offset != entry_offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -690,7 +690,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(&folio->page, false, NULL);
+ swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry));
}
@@ -848,7 +848,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_readpage(&folio->page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (i != ra_info.offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -866,7 +866,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(&folio->page, false, NULL);
+ swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry));
}
@@ -2225,7 +2225,7 @@ EXPORT_SYMBOL_GPL(add_swap_extent);
/*
* A `swap extent' is a simple thing which maps a contiguous range of pages
* onto a contiguous range of disk blocks. A rbtree of swap extents is
- * built at swapon time and is then used at swap_writepage/swap_readpage
+ * built at swapon time and is then used at swap_writepage/swap_read_folio
* time for locating where on disk a page belongs.
*
* If the swapfile is an S_ISBLK block device, a single extent is installed.
All callers have a folio, so pass it in, saving two calls to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/memory.c | 4 ++-- mm/page_io.c | 18 +++++++++--------- mm/swap.h | 5 +++-- mm/swap_state.c | 12 ++++++------ mm/swapfile.c | 2 +- 5 files changed, 21 insertions(+), 20 deletions(-)