diff mbox series

[1/2] MM: handle THP in swap_*page_fs()

Message ID 165119301488.15698.9457662928942765453.stgit@noble.brown (mailing list archive)
State New
Headers show
Series Finalising swap-over-NFS patches | expand

Commit Message

NeilBrown April 29, 2022, 12:43 a.m. UTC
Pages passed to swap_readpage()/swap_writepage() are not necessarily all
the same size - there may be transparent-huge-pages involves.

The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
path does not.

So we need to use thp_size() to find the size, not just assume
PAGE_SIZE, and we need to track the total length of the request, not
just assume it is "page * PAGE_SIZE".

Reported-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: NeilBrown <neilb@suse.de>
---
 mm/page_io.c |   23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

Comments

Andrew Morton April 29, 2022, 1:21 a.m. UTC | #1
On Fri, 29 Apr 2022 10:43:34 +1000 NeilBrown <neilb@suse.de> wrote:

> Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> the same size - there may be transparent-huge-pages involves.
> 
> The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> path does not.
> 
> So we need to use thp_size() to find the size, not just assume
> PAGE_SIZE, and we need to track the total length of the request, not
> just assume it is "page * PAGE_SIZE".

Cool.  I added this in the series after
mm-submit-multipage-write-for-swp_fs_ops-swap-space.patch.  I could
later squash it into that patch if you think that's more logical.
NeilBrown April 29, 2022, 1:57 a.m. UTC | #2
On Fri, 29 Apr 2022, Andrew Morton wrote:
> On Fri, 29 Apr 2022 10:43:34 +1000 NeilBrown <neilb@suse.de> wrote:
> 
> > Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> > the same size - there may be transparent-huge-pages involves.
> > 
> > The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> > path does not.
> > 
> > So we need to use thp_size() to find the size, not just assume
> > PAGE_SIZE, and we need to track the total length of the request, not
> > just assume it is "page * PAGE_SIZE".
> 
> Cool.  I added this in the series after
> mm-submit-multipage-write-for-swp_fs_ops-swap-space.patch.  I could
> later squash it into that patch if you think that's more logical.

I think it best to keep it separate, though that position is good.
If we were to squash, some would need to go into the "submit multipage
reads" patch, and some in "submit multipage writes".  IF you wanted to
do that I wouldn't object but I don't think it is needed.

Thanks,
NeilBrown
Miaohe Lin April 29, 2022, 8:13 a.m. UTC | #3
On 2022/4/29 8:43, NeilBrown wrote:
> Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> the same size - there may be transparent-huge-pages involves.
> 
> The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> path does not.
> 
> So we need to use thp_size() to find the size, not just assume
> PAGE_SIZE, and we need to track the total length of the request, not
> just assume it is "page * PAGE_SIZE".
> 
> Reported-by: Miaohe Lin <linmiaohe@huawei.com>
> Signed-off-by: NeilBrown <neilb@suse.de>
> ---
>  mm/page_io.c |   23 +++++++++++++----------
>  1 file changed, 13 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/page_io.c b/mm/page_io.c
> index c132511f521c..d636a3531cad 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -239,6 +239,7 @@ struct swap_iocb {
>  	struct kiocb		iocb;
>  	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
>  	int			pages;
> +	int			len;
>  };
>  static mempool_t *sio_pool;
>  
> @@ -261,7 +262,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)

The patch looks good to me. Thanks!

But we might need use count_swpout_vm_event in sio_write_complete. THP_SWPOUT should be
accounted too. And count_vm_events(PSWPOUT, sio->pages) doesn't account the right number
of pages now. Maybe sio_read_complete also needs this fix. Or am I miss something?

Thanks!

>  	struct page *page = sio->bvec[0].bv_page;
>  	int p;
>  
> -	if (ret != PAGE_SIZE * sio->pages) {
> +	if (ret != sio->len) {
>  		/*
>  		 * In the case of swap-over-nfs, this can be a
>  		 * temporary failure if the system has limited
> @@ -301,7 +302,7 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
>  		sio = *wbc->swap_plug;
>  	if (sio) {
>  		if (sio->iocb.ki_filp != swap_file ||
> -		    sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> +		    sio->iocb.ki_pos + sio->len != pos) {
>  			swap_write_unplug(sio);
>  			sio = NULL;
>  		}
> @@ -312,10 +313,12 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
>  		sio->iocb.ki_complete = sio_write_complete;
>  		sio->iocb.ki_pos = pos;
>  		sio->pages = 0;
> +		sio->len = 0;
>  	}
>  	sio->bvec[sio->pages].bv_page = page;
> -	sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> +	sio->bvec[sio->pages].bv_len = thp_size(page);
>  	sio->bvec[sio->pages].bv_offset = 0;
> +	sio->len += thp_size(page);
>  	sio->pages += 1;
>  	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
>  		swap_write_unplug(sio);
> @@ -371,8 +374,7 @@ void swap_write_unplug(struct swap_iocb *sio)
>  	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>  	int ret;
>  
> -	iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
> -		      PAGE_SIZE * sio->pages);
> +	iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
>  	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>  	if (ret != -EIOCBQUEUED)
>  		sio_write_complete(&sio->iocb, ret);
> @@ -383,7 +385,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
>  	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
>  	int p;
>  
> -	if (ret == PAGE_SIZE * sio->pages) {
> +	if (ret == sio->len) {
>  		for (p = 0; p < sio->pages; p++) {
>  			struct page *page = sio->bvec[p].bv_page;
>  
> @@ -415,7 +417,7 @@ static void swap_readpage_fs(struct page *page,
>  		sio = *plug;
>  	if (sio) {
>  		if (sio->iocb.ki_filp != sis->swap_file ||
> -		    sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> +		    sio->iocb.ki_pos + sio->len != pos) {
>  			swap_read_unplug(sio);
>  			sio = NULL;
>  		}
> @@ -426,10 +428,12 @@ static void swap_readpage_fs(struct page *page,
>  		sio->iocb.ki_pos = pos;
>  		sio->iocb.ki_complete = sio_read_complete;
>  		sio->pages = 0;
> +		sio->len = 0;
>  	}
>  	sio->bvec[sio->pages].bv_page = page;
> -	sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> +	sio->bvec[sio->pages].bv_len = thp_size(page);
>  	sio->bvec[sio->pages].bv_offset = 0;
> +	sio->len += thp_size(page);
>  	sio->pages += 1;
>  	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
>  		swap_read_unplug(sio);
> @@ -521,8 +525,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
>  	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>  	int ret;
>  
> -	iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
> -		      PAGE_SIZE * sio->pages);
> +	iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
>  	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>  	if (ret != -EIOCBQUEUED)
>  		sio_read_complete(&sio->iocb, ret);
> 
> 
> .
>
Yang Shi April 29, 2022, 7:04 p.m. UTC | #4
On Thu, Apr 28, 2022 at 5:44 PM NeilBrown <neilb@suse.de> wrote:
>
> Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> the same size - there may be transparent-huge-pages involves.
>
> The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> path does not.
>
> So we need to use thp_size() to find the size, not just assume
> PAGE_SIZE, and we need to track the total length of the request, not
> just assume it is "page * PAGE_SIZE".

Swap-over-nfs doesn't support THP swap IIUC. So SWP_FS_OPS should not
see THP at all. But I agree to remove the assumption about page size
in this path.

>
> Reported-by: Miaohe Lin <linmiaohe@huawei.com>
> Signed-off-by: NeilBrown <neilb@suse.de>
> ---
>  mm/page_io.c |   23 +++++++++++++----------
>  1 file changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/mm/page_io.c b/mm/page_io.c
> index c132511f521c..d636a3531cad 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -239,6 +239,7 @@ struct swap_iocb {
>         struct kiocb            iocb;
>         struct bio_vec          bvec[SWAP_CLUSTER_MAX];
>         int                     pages;
> +       int                     len;
>  };
>  static mempool_t *sio_pool;
>
> @@ -261,7 +262,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
>         struct page *page = sio->bvec[0].bv_page;
>         int p;
>
> -       if (ret != PAGE_SIZE * sio->pages) {
> +       if (ret != sio->len) {
>                 /*
>                  * In the case of swap-over-nfs, this can be a
>                  * temporary failure if the system has limited
> @@ -301,7 +302,7 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
>                 sio = *wbc->swap_plug;
>         if (sio) {
>                 if (sio->iocb.ki_filp != swap_file ||
> -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> +                   sio->iocb.ki_pos + sio->len != pos) {
>                         swap_write_unplug(sio);
>                         sio = NULL;
>                 }
> @@ -312,10 +313,12 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
>                 sio->iocb.ki_complete = sio_write_complete;
>                 sio->iocb.ki_pos = pos;
>                 sio->pages = 0;
> +               sio->len = 0;
>         }
>         sio->bvec[sio->pages].bv_page = page;
> -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> +       sio->bvec[sio->pages].bv_len = thp_size(page);
>         sio->bvec[sio->pages].bv_offset = 0;
> +       sio->len += thp_size(page);
>         sio->pages += 1;
>         if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
>                 swap_write_unplug(sio);
> @@ -371,8 +374,7 @@ void swap_write_unplug(struct swap_iocb *sio)
>         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>         int ret;
>
> -       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
> -                     PAGE_SIZE * sio->pages);
> +       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
>         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>         if (ret != -EIOCBQUEUED)
>                 sio_write_complete(&sio->iocb, ret);
> @@ -383,7 +385,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
>         struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
>         int p;
>
> -       if (ret == PAGE_SIZE * sio->pages) {
> +       if (ret == sio->len) {
>                 for (p = 0; p < sio->pages; p++) {
>                         struct page *page = sio->bvec[p].bv_page;
>
> @@ -415,7 +417,7 @@ static void swap_readpage_fs(struct page *page,
>                 sio = *plug;
>         if (sio) {
>                 if (sio->iocb.ki_filp != sis->swap_file ||
> -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> +                   sio->iocb.ki_pos + sio->len != pos) {
>                         swap_read_unplug(sio);
>                         sio = NULL;
>                 }
> @@ -426,10 +428,12 @@ static void swap_readpage_fs(struct page *page,
>                 sio->iocb.ki_pos = pos;
>                 sio->iocb.ki_complete = sio_read_complete;
>                 sio->pages = 0;
> +               sio->len = 0;
>         }
>         sio->bvec[sio->pages].bv_page = page;
> -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> +       sio->bvec[sio->pages].bv_len = thp_size(page);
>         sio->bvec[sio->pages].bv_offset = 0;
> +       sio->len += thp_size(page);
>         sio->pages += 1;
>         if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
>                 swap_read_unplug(sio);
> @@ -521,8 +525,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
>         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
>         int ret;
>
> -       iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
> -                     PAGE_SIZE * sio->pages);
> +       iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
>         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
>         if (ret != -EIOCBQUEUED)
>                 sio_read_complete(&sio->iocb, ret);
>
>
>
NeilBrown May 2, 2022, 4:23 a.m. UTC | #5
On Sat, 30 Apr 2022, Yang Shi wrote:
> On Thu, Apr 28, 2022 at 5:44 PM NeilBrown <neilb@suse.de> wrote:
> >
> > Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> > the same size - there may be transparent-huge-pages involves.
> >
> > The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> > path does not.
> >
> > So we need to use thp_size() to find the size, not just assume
> > PAGE_SIZE, and we need to track the total length of the request, not
> > just assume it is "page * PAGE_SIZE".
> 
> Swap-over-nfs doesn't support THP swap IIUC. So SWP_FS_OPS should not
> see THP at all. But I agree to remove the assumption about page size
> in this path.

Can you help me understand this please.  How would the swap code know
that swap-over-NFS doesn't support THP swap?  There is no reason that
NFS wouldn't be able to handle 2MB writes.  Even 1GB should work though
NFS would have to split into several smaller WRITE requests.

Thanks,
NeilBrown


> 
> >
> > Reported-by: Miaohe Lin <linmiaohe@huawei.com>
> > Signed-off-by: NeilBrown <neilb@suse.de>
> > ---
> >  mm/page_io.c |   23 +++++++++++++----------
> >  1 file changed, 13 insertions(+), 10 deletions(-)
> >
> > diff --git a/mm/page_io.c b/mm/page_io.c
> > index c132511f521c..d636a3531cad 100644
> > --- a/mm/page_io.c
> > +++ b/mm/page_io.c
> > @@ -239,6 +239,7 @@ struct swap_iocb {
> >         struct kiocb            iocb;
> >         struct bio_vec          bvec[SWAP_CLUSTER_MAX];
> >         int                     pages;
> > +       int                     len;
> >  };
> >  static mempool_t *sio_pool;
> >
> > @@ -261,7 +262,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
> >         struct page *page = sio->bvec[0].bv_page;
> >         int p;
> >
> > -       if (ret != PAGE_SIZE * sio->pages) {
> > +       if (ret != sio->len) {
> >                 /*
> >                  * In the case of swap-over-nfs, this can be a
> >                  * temporary failure if the system has limited
> > @@ -301,7 +302,7 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
> >                 sio = *wbc->swap_plug;
> >         if (sio) {
> >                 if (sio->iocb.ki_filp != swap_file ||
> > -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> > +                   sio->iocb.ki_pos + sio->len != pos) {
> >                         swap_write_unplug(sio);
> >                         sio = NULL;
> >                 }
> > @@ -312,10 +313,12 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
> >                 sio->iocb.ki_complete = sio_write_complete;
> >                 sio->iocb.ki_pos = pos;
> >                 sio->pages = 0;
> > +               sio->len = 0;
> >         }
> >         sio->bvec[sio->pages].bv_page = page;
> > -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> > +       sio->bvec[sio->pages].bv_len = thp_size(page);
> >         sio->bvec[sio->pages].bv_offset = 0;
> > +       sio->len += thp_size(page);
> >         sio->pages += 1;
> >         if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
> >                 swap_write_unplug(sio);
> > @@ -371,8 +374,7 @@ void swap_write_unplug(struct swap_iocb *sio)
> >         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
> >         int ret;
> >
> > -       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
> > -                     PAGE_SIZE * sio->pages);
> > +       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
> >         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
> >         if (ret != -EIOCBQUEUED)
> >                 sio_write_complete(&sio->iocb, ret);
> > @@ -383,7 +385,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
> >         struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
> >         int p;
> >
> > -       if (ret == PAGE_SIZE * sio->pages) {
> > +       if (ret == sio->len) {
> >                 for (p = 0; p < sio->pages; p++) {
> >                         struct page *page = sio->bvec[p].bv_page;
> >
> > @@ -415,7 +417,7 @@ static void swap_readpage_fs(struct page *page,
> >                 sio = *plug;
> >         if (sio) {
> >                 if (sio->iocb.ki_filp != sis->swap_file ||
> > -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> > +                   sio->iocb.ki_pos + sio->len != pos) {
> >                         swap_read_unplug(sio);
> >                         sio = NULL;
> >                 }
> > @@ -426,10 +428,12 @@ static void swap_readpage_fs(struct page *page,
> >                 sio->iocb.ki_pos = pos;
> >                 sio->iocb.ki_complete = sio_read_complete;
> >                 sio->pages = 0;
> > +               sio->len = 0;
> >         }
> >         sio->bvec[sio->pages].bv_page = page;
> > -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> > +       sio->bvec[sio->pages].bv_len = thp_size(page);
> >         sio->bvec[sio->pages].bv_offset = 0;
> > +       sio->len += thp_size(page);
> >         sio->pages += 1;
> >         if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
> >                 swap_read_unplug(sio);
> > @@ -521,8 +525,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
> >         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
> >         int ret;
> >
> > -       iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
> > -                     PAGE_SIZE * sio->pages);
> > +       iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
> >         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
> >         if (ret != -EIOCBQUEUED)
> >                 sio_read_complete(&sio->iocb, ret);
> >
> >
> >
>
Yang Shi May 2, 2022, 5:48 p.m. UTC | #6
On Sun, May 1, 2022 at 9:23 PM NeilBrown <neilb@suse.de> wrote:
>
> On Sat, 30 Apr 2022, Yang Shi wrote:
> > On Thu, Apr 28, 2022 at 5:44 PM NeilBrown <neilb@suse.de> wrote:
> > >
> > > Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> > > the same size - there may be transparent-huge-pages involves.
> > >
> > > The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> > > path does not.
> > >
> > > So we need to use thp_size() to find the size, not just assume
> > > PAGE_SIZE, and we need to track the total length of the request, not
> > > just assume it is "page * PAGE_SIZE".
> >
> > Swap-over-nfs doesn't support THP swap IIUC. So SWP_FS_OPS should not
> > see THP at all. But I agree to remove the assumption about page size
> > in this path.
>
> Can you help me understand this please.  How would the swap code know
> that swap-over-NFS doesn't support THP swap?  There is no reason that
> NFS wouldn't be able to handle 2MB writes.  Even 1GB should work though
> NFS would have to split into several smaller WRITE requests.

AFAICT, THP swap is only supported on non-rotate block devices, for
example, SSD, PMEM, etc. IIRC, the swap device has to support the
cluster in order to swap THP. The cluster is only supported by
non-rotate block devices.

Looped Ying in, who is the author of THP swap.

>
> Thanks,
> NeilBrown
>
>
> >
> > >
> > > Reported-by: Miaohe Lin <linmiaohe@huawei.com>
> > > Signed-off-by: NeilBrown <neilb@suse.de>
> > > ---
> > >  mm/page_io.c |   23 +++++++++++++----------
> > >  1 file changed, 13 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/mm/page_io.c b/mm/page_io.c
> > > index c132511f521c..d636a3531cad 100644
> > > --- a/mm/page_io.c
> > > +++ b/mm/page_io.c
> > > @@ -239,6 +239,7 @@ struct swap_iocb {
> > >         struct kiocb            iocb;
> > >         struct bio_vec          bvec[SWAP_CLUSTER_MAX];
> > >         int                     pages;
> > > +       int                     len;
> > >  };
> > >  static mempool_t *sio_pool;
> > >
> > > @@ -261,7 +262,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
> > >         struct page *page = sio->bvec[0].bv_page;
> > >         int p;
> > >
> > > -       if (ret != PAGE_SIZE * sio->pages) {
> > > +       if (ret != sio->len) {
> > >                 /*
> > >                  * In the case of swap-over-nfs, this can be a
> > >                  * temporary failure if the system has limited
> > > @@ -301,7 +302,7 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
> > >                 sio = *wbc->swap_plug;
> > >         if (sio) {
> > >                 if (sio->iocb.ki_filp != swap_file ||
> > > -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> > > +                   sio->iocb.ki_pos + sio->len != pos) {
> > >                         swap_write_unplug(sio);
> > >                         sio = NULL;
> > >                 }
> > > @@ -312,10 +313,12 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
> > >                 sio->iocb.ki_complete = sio_write_complete;
> > >                 sio->iocb.ki_pos = pos;
> > >                 sio->pages = 0;
> > > +               sio->len = 0;
> > >         }
> > >         sio->bvec[sio->pages].bv_page = page;
> > > -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> > > +       sio->bvec[sio->pages].bv_len = thp_size(page);
> > >         sio->bvec[sio->pages].bv_offset = 0;
> > > +       sio->len += thp_size(page);
> > >         sio->pages += 1;
> > >         if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
> > >                 swap_write_unplug(sio);
> > > @@ -371,8 +374,7 @@ void swap_write_unplug(struct swap_iocb *sio)
> > >         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
> > >         int ret;
> > >
> > > -       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
> > > -                     PAGE_SIZE * sio->pages);
> > > +       iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
> > >         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
> > >         if (ret != -EIOCBQUEUED)
> > >                 sio_write_complete(&sio->iocb, ret);
> > > @@ -383,7 +385,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
> > >         struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
> > >         int p;
> > >
> > > -       if (ret == PAGE_SIZE * sio->pages) {
> > > +       if (ret == sio->len) {
> > >                 for (p = 0; p < sio->pages; p++) {
> > >                         struct page *page = sio->bvec[p].bv_page;
> > >
> > > @@ -415,7 +417,7 @@ static void swap_readpage_fs(struct page *page,
> > >                 sio = *plug;
> > >         if (sio) {
> > >                 if (sio->iocb.ki_filp != sis->swap_file ||
> > > -                   sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
> > > +                   sio->iocb.ki_pos + sio->len != pos) {
> > >                         swap_read_unplug(sio);
> > >                         sio = NULL;
> > >                 }
> > > @@ -426,10 +428,12 @@ static void swap_readpage_fs(struct page *page,
> > >                 sio->iocb.ki_pos = pos;
> > >                 sio->iocb.ki_complete = sio_read_complete;
> > >                 sio->pages = 0;
> > > +               sio->len = 0;
> > >         }
> > >         sio->bvec[sio->pages].bv_page = page;
> > > -       sio->bvec[sio->pages].bv_len = PAGE_SIZE;
> > > +       sio->bvec[sio->pages].bv_len = thp_size(page);
> > >         sio->bvec[sio->pages].bv_offset = 0;
> > > +       sio->len += thp_size(page);
> > >         sio->pages += 1;
> > >         if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
> > >                 swap_read_unplug(sio);
> > > @@ -521,8 +525,7 @@ void __swap_read_unplug(struct swap_iocb *sio)
> > >         struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
> > >         int ret;
> > >
> > > -       iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
> > > -                     PAGE_SIZE * sio->pages);
> > > +       iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
> > >         ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
> > >         if (ret != -EIOCBQUEUED)
> > >                 sio_read_complete(&sio->iocb, ret);
> > >
> > >
> > >
> >
NeilBrown May 4, 2022, 11:41 p.m. UTC | #7
On Tue, 03 May 2022, Yang Shi wrote:
> On Sun, May 1, 2022 at 9:23 PM NeilBrown <neilb@suse.de> wrote:
> >
> > On Sat, 30 Apr 2022, Yang Shi wrote:
> > > On Thu, Apr 28, 2022 at 5:44 PM NeilBrown <neilb@suse.de> wrote:
> > > >
> > > > Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> > > > the same size - there may be transparent-huge-pages involves.
> > > >
> > > > The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> > > > path does not.
> > > >
> > > > So we need to use thp_size() to find the size, not just assume
> > > > PAGE_SIZE, and we need to track the total length of the request, not
> > > > just assume it is "page * PAGE_SIZE".
> > >
> > > Swap-over-nfs doesn't support THP swap IIUC. So SWP_FS_OPS should not
> > > see THP at all. But I agree to remove the assumption about page size
> > > in this path.
> >
> > Can you help me understand this please.  How would the swap code know
> > that swap-over-NFS doesn't support THP swap?  There is no reason that
> > NFS wouldn't be able to handle 2MB writes.  Even 1GB should work though
> > NFS would have to split into several smaller WRITE requests.
> 
> AFAICT, THP swap is only supported on non-rotate block devices, for
> example, SSD, PMEM, etc. IIRC, the swap device has to support the
> cluster in order to swap THP. The cluster is only supported by
> non-rotate block devices.
> 
> Looped Ying in, who is the author of THP swap.

I hunted around the code and found that THP swap only happens if a
'cluster_info' is allocated, and that only happens if 
	if (p->bdev && bdev_nonrot(p->bdev)) {
in the swapon syscall.

I guess "nonrot" is being use as a synonym for "low latency"...
So even if NFS was low-latency it couldn't benefit from THP swap.

So as you say it is not currently possible for THP pages to be send to
NFS for swapout.  It makes sense to prepare for it though I think - if
only so that the code is more consistent and less confusing.

Thanks,
NeilBrown
Huang, Ying May 6, 2022, 2:56 a.m. UTC | #8
On Thu, 2022-05-05 at 09:41 +1000, NeilBrown wrote:
> On Tue, 03 May 2022, Yang Shi wrote:
> > On Sun, May 1, 2022 at 9:23 PM NeilBrown <neilb@suse.de> wrote:
> > > 
> > > On Sat, 30 Apr 2022, Yang Shi wrote:
> > > > On Thu, Apr 28, 2022 at 5:44 PM NeilBrown <neilb@suse.de> wrote:
> > > > > 
> > > > > Pages passed to swap_readpage()/swap_writepage() are not necessarily all
> > > > > the same size - there may be transparent-huge-pages involves.
> > > > > 
> > > > > The BIO paths of swap_*page() handle this correctly, but the SWP_FS_OPS
> > > > > path does not.
> > > > > 
> > > > > So we need to use thp_size() to find the size, not just assume
> > > > > PAGE_SIZE, and we need to track the total length of the request, not
> > > > > just assume it is "page * PAGE_SIZE".
> > > > 
> > > > Swap-over-nfs doesn't support THP swap IIUC. So SWP_FS_OPS should not
> > > > see THP at all. But I agree to remove the assumption about page size
> > > > in this path.
> > > 
> > > Can you help me understand this please.  How would the swap code know
> > > that swap-over-NFS doesn't support THP swap?  There is no reason that
> > > NFS wouldn't be able to handle 2MB writes.  Even 1GB should work though
> > > NFS would have to split into several smaller WRITE requests.
> > 
> > AFAICT, THP swap is only supported on non-rotate block devices, for
> > example, SSD, PMEM, etc. IIRC, the swap device has to support the
> > cluster in order to swap THP. The cluster is only supported by
> > non-rotate block devices.
> > 
> > Looped Ying in, who is the author of THP swap.
> 
> I hunted around the code and found that THP swap only happens if a
> 'cluster_info' is allocated, and that only happens if 
> 	if (p->bdev && bdev_nonrot(p->bdev)) {
> in the swapon syscall.
> 

And in get_swap_pages(), the cluster is only allocated for block
devices.

		if (size == SWAPFILE_CLUSTER) {
			if (si->flags & SWP_BLKDEV)
				n_ret = swap_alloc_cluster(si, swp_entries);
		} else
			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
						    n_goal, swp_entries);

We may remove this restriction in the future if someone can show the
benefit.

Best Regards,
Huang, Ying

> I guess "nonrot" is being use as a synonym for "low latency"...
> So even if NFS was low-latency it couldn't benefit from THP swap.
> 
> So as you say it is not currently possible for THP pages to be send to
> NFS for swapout.  It makes sense to prepare for it though I think - if
> only so that the code is more consistent and less confusing.
> 
> Thanks,
> NeilBrown
diff mbox series

Patch

diff --git a/mm/page_io.c b/mm/page_io.c
index c132511f521c..d636a3531cad 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -239,6 +239,7 @@  struct swap_iocb {
 	struct kiocb		iocb;
 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
 	int			pages;
+	int			len;
 };
 static mempool_t *sio_pool;
 
@@ -261,7 +262,7 @@  static void sio_write_complete(struct kiocb *iocb, long ret)
 	struct page *page = sio->bvec[0].bv_page;
 	int p;
 
-	if (ret != PAGE_SIZE * sio->pages) {
+	if (ret != sio->len) {
 		/*
 		 * In the case of swap-over-nfs, this can be a
 		 * temporary failure if the system has limited
@@ -301,7 +302,7 @@  static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
 		sio = *wbc->swap_plug;
 	if (sio) {
 		if (sio->iocb.ki_filp != swap_file ||
-		    sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
+		    sio->iocb.ki_pos + sio->len != pos) {
 			swap_write_unplug(sio);
 			sio = NULL;
 		}
@@ -312,10 +313,12 @@  static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
 		sio->iocb.ki_complete = sio_write_complete;
 		sio->iocb.ki_pos = pos;
 		sio->pages = 0;
+		sio->len = 0;
 	}
 	sio->bvec[sio->pages].bv_page = page;
-	sio->bvec[sio->pages].bv_len = PAGE_SIZE;
+	sio->bvec[sio->pages].bv_len = thp_size(page);
 	sio->bvec[sio->pages].bv_offset = 0;
+	sio->len += thp_size(page);
 	sio->pages += 1;
 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
 		swap_write_unplug(sio);
@@ -371,8 +374,7 @@  void swap_write_unplug(struct swap_iocb *sio)
 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
 	int ret;
 
-	iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages,
-		      PAGE_SIZE * sio->pages);
+	iov_iter_bvec(&from, WRITE, sio->bvec, sio->pages, sio->len);
 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
 	if (ret != -EIOCBQUEUED)
 		sio_write_complete(&sio->iocb, ret);
@@ -383,7 +385,7 @@  static void sio_read_complete(struct kiocb *iocb, long ret)
 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
 	int p;
 
-	if (ret == PAGE_SIZE * sio->pages) {
+	if (ret == sio->len) {
 		for (p = 0; p < sio->pages; p++) {
 			struct page *page = sio->bvec[p].bv_page;
 
@@ -415,7 +417,7 @@  static void swap_readpage_fs(struct page *page,
 		sio = *plug;
 	if (sio) {
 		if (sio->iocb.ki_filp != sis->swap_file ||
-		    sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
+		    sio->iocb.ki_pos + sio->len != pos) {
 			swap_read_unplug(sio);
 			sio = NULL;
 		}
@@ -426,10 +428,12 @@  static void swap_readpage_fs(struct page *page,
 		sio->iocb.ki_pos = pos;
 		sio->iocb.ki_complete = sio_read_complete;
 		sio->pages = 0;
+		sio->len = 0;
 	}
 	sio->bvec[sio->pages].bv_page = page;
-	sio->bvec[sio->pages].bv_len = PAGE_SIZE;
+	sio->bvec[sio->pages].bv_len = thp_size(page);
 	sio->bvec[sio->pages].bv_offset = 0;
+	sio->len += thp_size(page);
 	sio->pages += 1;
 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
 		swap_read_unplug(sio);
@@ -521,8 +525,7 @@  void __swap_read_unplug(struct swap_iocb *sio)
 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
 	int ret;
 
-	iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
-		      PAGE_SIZE * sio->pages);
+	iov_iter_bvec(&from, READ, sio->bvec, sio->pages, sio->len);
 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
 	if (ret != -EIOCBQUEUED)
 		sio_read_complete(&sio->iocb, ret);