diff mbox series

[12/13] MM: use AIO/DIO for reads from SWP_FS_OPS swap-space

Message ID 163703064458.25805.6777856691611196478.stgit@noble.brown (mailing list archive)
State New
Headers show
Series Repair SWAP-over-NFS | expand

Commit Message

NeilBrown Nov. 16, 2021, 2:44 a.m. UTC
When pages a read from SWP_FS_OPS swap-space, the reads are submitted as
separate reads for each page.  This is generally less efficient than
larger reads.

We can use the block-plugging infrastructure to delay submitting the
read request until multiple contigious pages have been collected.  This
requires using ->direct_IO to submit the read (as ->readpages isn't
suitable for swap).

If the caller schedules before unplugging, we hand the read-in task off
to systemwq to avoid any possible locking issues.

Signed-off-by: NeilBrown <neilb@suse.de>
---
 mm/page_io.c |  107 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 103 insertions(+), 4 deletions(-)

Comments

Christoph Hellwig Nov. 16, 2021, 8:31 a.m. UTC | #1
On Tue, Nov 16, 2021 at 01:44:04PM +1100, NeilBrown wrote:
> When pages a read from SWP_FS_OPS swap-space, the reads are submitted as
> separate reads for each page.  This is generally less efficient than
> larger reads.
> 
> We can use the block-plugging infrastructure to delay submitting the
> read request until multiple contigious pages have been collected.  This
> requires using ->direct_IO to submit the read (as ->readpages isn't
> suitable for swap).

Abusing the block code here seems little ugly.  Also this won't
compile if CONFIG_BLOCK is not set, will it?

What is the problem with just batching up manually?

> +	/* nofs needs as ->direct_IO may take the same mutex it takes for write */

Overly long line.
NeilBrown Nov. 16, 2021, 9:46 p.m. UTC | #2
On Tue, 16 Nov 2021, Christoph Hellwig wrote:
> On Tue, Nov 16, 2021 at 01:44:04PM +1100, NeilBrown wrote:
> > When pages a read from SWP_FS_OPS swap-space, the reads are submitted as
> > separate reads for each page.  This is generally less efficient than
> > larger reads.
> > 
> > We can use the block-plugging infrastructure to delay submitting the
> > read request until multiple contigious pages have been collected.  This
> > requires using ->direct_IO to submit the read (as ->readpages isn't
> > suitable for swap).
> 
> Abusing the block code here seems little ugly.  Also this won't
> compile if CONFIG_BLOCK is not set, will it?

There is nothing really block-layer-specific about the plugging
interfaces.  I think it would be quite reasonable to move them to lib/
But you are correct that currently without CONFIG_BLOCK the code will
compile but not work.

> 
> What is the problem with just batching up manually?

That would require a bigger change to common code, which would only
benefit one user.  The plugging mechanism works well for batching
requests to a block device.  Why not use it for non-block-devices too?

Thanks,
NeilBrown


> 
> > +	/* nofs needs as ->direct_IO may take the same mutex it takes for write */
> 
> Overly long line.
> 
>
diff mbox series

Patch

diff --git a/mm/page_io.c b/mm/page_io.c
index 9725c7e1eeea..30d613881995 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -282,6 +282,14 @@  static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
 #define bio_associate_blkg_from_page(bio, page)		do { } while (0)
 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
 
+struct swap_iocb {
+	struct blk_plug_cb	cb;	/* Must be first */
+	struct kiocb		iocb;
+	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
+	struct work_struct	work;
+	int			pages;
+};
+
 int __swap_writepage(struct page *page, struct writeback_control *wbc,
 		bio_end_io_t end_write_func)
 {
@@ -353,6 +361,59 @@  int __swap_writepage(struct page *page, struct writeback_control *wbc,
 	return 0;
 }
 
+static void sio_read_complete(struct kiocb *iocb, long ret)
+{
+	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
+	int p;
+
+	for (p = 0; p < sio->pages; p++) {
+		struct page *page = sio->bvec[p].bv_page;
+
+		if (ret != PAGE_SIZE * sio->pages) {
+			SetPageError(page);
+			ClearPageUptodate(page);
+			pr_alert_ratelimited("Read-error on swap-device\n");
+		} else {
+			SetPageUptodate(page);
+			count_vm_event(PSWPIN);
+		}
+		unlock_page(page);
+	}
+	kfree(sio);
+}
+
+static void sio_read_unplug(struct blk_plug_cb *cb, bool from_schedule);
+
+static void sio_read_unplug_worker(struct work_struct *work)
+{
+	struct swap_iocb *sio = container_of(work, struct swap_iocb, work);
+	sio_read_unplug(&sio->cb, 0);
+}
+
+static void sio_read_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+	struct swap_iocb *sio = container_of(cb, struct swap_iocb, cb);
+	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
+	struct iov_iter from;
+	unsigned int nofs_flag;
+	int ret;
+
+	if (from_schedule) {
+		INIT_WORK(&sio->work, sio_read_unplug_worker);
+		schedule_work(&sio->work);
+		return;
+	}
+
+	iov_iter_bvec(&from, READ, sio->bvec,
+		      sio->pages, PAGE_SIZE * sio->pages);
+	/* nofs needs as ->direct_IO may take the same mutex it takes for write */
+	nofs_flag = memalloc_nofs_save();
+	ret = mapping->a_ops->direct_IO(&sio->iocb, &from);
+	memalloc_nofs_restore(nofs_flag);
+	if (ret != -EIOCBQUEUED)
+		sio_read_complete(&sio->iocb, ret);
+}
+
 int swap_readpage(struct page *page, bool synchronous)
 {
 	struct bio *bio;
@@ -380,10 +441,48 @@  int swap_readpage(struct page *page, bool synchronous)
 	if (data_race(sis->flags & SWP_FS_OPS)) {
 		struct file *swap_file = sis->swap_file;
 		struct address_space *mapping = swap_file->f_mapping;
-
-		ret = mapping->a_ops->readpage(swap_file, page);
-		if (!ret)
-			count_vm_event(PSWPIN);
+		struct blk_plug_cb *cb;
+		struct swap_iocb *sio;
+		loff_t pos = page_file_offset(page);
+		struct blk_plug plug;
+		int p;
+
+		/* We are sometimes called without a plug active.
+		 * By calling blk_start_plug() here, we ensure blk_check_plugged
+		 * only fails if memory allocation fails.
+		 */
+		blk_start_plug(&plug);
+		cb = blk_check_plugged(sio_read_unplug, swap_file, sizeof(*sio));
+		sio = container_of(cb, struct swap_iocb, cb);
+		if (cb && sio->pages &&
+		    sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
+			/* Not contiguous - hide this sio from lookup */
+			cb->data = NULL;
+			cb = blk_check_plugged(sio_read_unplug, swap_file,
+					       sizeof(*sio));
+			sio = container_of(cb, struct swap_iocb, cb);
+		}
+		if (!cb) {
+			blk_finish_plug(&plug);
+			ret = mapping->a_ops->readpage(swap_file, page);
+			if (!ret)
+				count_vm_event(PSWPIN);
+			goto out;
+		}
+		if (sio->pages == 0) {
+			init_sync_kiocb(&sio->iocb, swap_file);
+			sio->iocb.ki_pos = pos;
+			sio->iocb.ki_complete = sio_read_complete;
+		}
+		p = sio->pages;
+		sio->bvec[p].bv_page = page;
+		sio->bvec[p].bv_len = PAGE_SIZE;
+		sio->bvec[p].bv_offset = 0;
+		p += 1;
+		sio->pages = p;
+		if (p == ARRAY_SIZE(sio->bvec))
+			cb->data = NULL;
+		blk_finish_plug(&plug);
 		goto out;
 	}