@@ -4113,7 +4113,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* To provide entry to swap_read_folio() */
folio->swap = entry;
- swap_read_folio(folio, true, NULL);
+ swap_read_folio(folio, NULL);
folio->private = NULL;
}
} else {
@@ -493,10 +493,10 @@ static void swap_read_folio_bdev_async(struct folio *folio,
submit_bio(bio);
}
-void swap_read_folio(struct folio *folio, bool synchronous,
- struct swap_iocb **plug)
+void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO;
bool workingset = folio_test_workingset(folio);
unsigned long pflags;
bool in_thrashing;
@@ -521,7 +521,7 @@ void swap_read_folio(struct folio *folio, bool synchronous,
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
swap_read_folio_fs(folio, plug);
- } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
+ } else if (synchronous) {
swap_read_folio_bdev_sync(folio, sis);
} else {
swap_read_folio_bdev_async(folio, sis);
@@ -11,8 +11,7 @@ struct mempolicy;
/* linux/mm/page_io.c */
int sio_pool_init(void);
struct swap_iocb;
-void swap_read_folio(struct folio *folio, bool do_poll,
- struct swap_iocb **plug);
+void swap_read_folio(struct folio *folio, struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
@@ -83,8 +82,7 @@ static inline unsigned int folio_swap_flags(struct folio *folio)
}
#else /* CONFIG_SWAP */
struct swap_iocb;
-static inline void swap_read_folio(struct folio *folio, bool do_poll,
- struct swap_iocb **plug)
+static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
{
}
static inline void swap_write_unplug(struct swap_iocb *sio)
@@ -567,7 +567,7 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
mpol_cond_put(mpol);
if (page_allocated)
- swap_read_folio(folio, false, plug);
+ swap_read_folio(folio, plug);
return folio;
}
@@ -684,7 +684,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_read_folio(folio, false, &splug);
+ swap_read_folio(folio, &splug);
if (offset != entry_offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -701,7 +701,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
&page_allocated, false);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
- swap_read_folio(folio, false, NULL);
+ swap_read_folio(folio, NULL);
}
return folio;
}
@@ -834,7 +834,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_read_folio(folio, false, &splug);
+ swap_read_folio(folio, &splug);
if (addr != vmf->address) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -853,7 +853,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
&page_allocated, false);
if (unlikely(page_allocated)) {
zswap_folio_swapin(folio);
- swap_read_folio(folio, false, NULL);
+ swap_read_folio(folio, NULL);
}
return folio;
}