diff mbox series

[v2] zswap: do not crash the kernel on decompression failure

Message ID 20250227001445.1099203-1-nphamcs@gmail.com (mailing list archive)
State New
Headers show
Series [v2] zswap: do not crash the kernel on decompression failure | expand

Commit Message

Nhat Pham Feb. 27, 2025, 12:14 a.m. UTC
Currently, we crash the kernel when a decompression failure occurs in
zswap (either because of memory corruption, or a bug in the compression
algorithm). This is overkill. We should only SIGBUS the unfortunate
process asking for the zswap entry on zswap load, and skip the corrupted
entry in zswap writeback. The former is accomplished by returning true
from zswap_load(), indicating that zswap owns the swapped out content,
but without flagging the folio as up-to-date. The process trying to swap
in the page will check for the uptodate folio flag and SIGBUS (see
do_swap_page() in mm/memory.c for more details).

See [1] for a recent upstream discussion about this.

[1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/

Suggested-by: Matthew Wilcox <willy@infradead.org>
Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
 mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
 1 file changed, 67 insertions(+), 27 deletions(-)


base-commit: 598d34afeca6bb10554846cf157a3ded8729516c

Comments

Yosry Ahmed Feb. 27, 2025, 1:19 a.m. UTC | #1
On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> Currently, we crash the kernel when a decompression failure occurs in
> zswap (either because of memory corruption, or a bug in the compression
> algorithm). This is overkill. We should only SIGBUS the unfortunate
> process asking for the zswap entry on zswap load, and skip the corrupted
> entry in zswap writeback. The former is accomplished by returning true
> from zswap_load(), indicating that zswap owns the swapped out content,
> but without flagging the folio as up-to-date. The process trying to swap
> in the page will check for the uptodate folio flag and SIGBUS (see
> do_swap_page() in mm/memory.c for more details).

We should call out the extra xarray walks and their perf impact (if
any).

> 
> See [1] for a recent upstream discussion about this.
> 
> [1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/
> 
> Suggested-by: Matthew Wilcox <willy@infradead.org>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> ---
>  mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
>  1 file changed, 67 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 6dbf31bd2218..e4a2157bbc64 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail;
>  static u64 zswap_reject_compress_fail;
>  /* Compressed page was too big for the allocator to (optimally) store */
>  static u64 zswap_reject_compress_poor;
> +/* Load or writeback failed due to decompression failure */
> +static u64 zswap_decompress_fail;
>  /* Store failed because underlying allocator could not get memory */
>  static u64 zswap_reject_alloc_fail;
>  /* Store failed because the entry metadata could not be allocated (rare) */
> @@ -996,11 +998,13 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
>  	return comp_ret == 0 && alloc_ret == 0;
>  }
>  
> -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>  {
>  	struct zpool *zpool = entry->pool->zpool;
>  	struct scatterlist input, output;
>  	struct crypto_acomp_ctx *acomp_ctx;
> +	int decomp_ret;
> +	bool ret = true;
>  	u8 *src;
>  
>  	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
> @@ -1025,12 +1029,25 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
>  	sg_init_table(&output, 1);
>  	sg_set_folio(&output, folio, PAGE_SIZE, 0);
>  	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> -	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> -	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
> +	decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> +	if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
> +		ret = false;
> +		zswap_decompress_fail++;
> +		pr_alert_ratelimited(
> +			"decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",

This is a very long line. I think we should break it into multiple
lines. I know multiline strings are frowned upon by checkpatch, by this
exist (see the warning in mem_cgroup_oom_control_write() for example),
and they are definitely better than a very long line imo.

> +			decomp_ret,
> +			entry->swpentry.val,
> +			swp_type(entry->swpentry),
> +			swp_offset(entry->swpentry),
> +			entry->pool->tfm_name,
> +			entry->length,
> +			acomp_ctx->req->dlen);
> +	}
>  
>  	if (src != acomp_ctx->buffer)
>  		zpool_unmap_handle(zpool, entry->handle);
>  	acomp_ctx_put_unlock(acomp_ctx);
> +	return ret;

Not a big deal but we could probably store the length in a local
variable and move the check here, and avoid needing 'ret'.

>  }
>  
>  /*********************************
> @@ -1060,6 +1077,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	struct writeback_control wbc = {
>  		.sync_mode = WB_SYNC_NONE,
>  	};
> +	int ret = 0;
>  
>  	/* try to allocate swap cache folio */
>  	si = get_swap_device(swpentry);
> @@ -1081,8 +1099,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	 * and freed when invalidated by the concurrent shrinker anyway.
>  	 */
>  	if (!folio_was_allocated) {
> -		folio_put(folio);
> -		return -EEXIST;
> +		ret = -EEXIST;
> +		goto put_folio;
>  	}
>  
>  	/*
> @@ -1095,14 +1113,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  	 * be dereferenced.
>  	 */
>  	tree = swap_zswap_tree(swpentry);
> -	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
> -		delete_from_swap_cache(folio);
> -		folio_unlock(folio);
> -		folio_put(folio);
> -		return -ENOMEM;
> +	if (entry != xa_load(tree, offset)) {
> +		ret = -ENOMEM;
> +		goto delete_unlock;
> +	}
> +
> +	if (!zswap_decompress(entry, folio)) {
> +		ret = -EIO;
> +		goto delete_unlock;
>  	}
>  
> -	zswap_decompress(entry, folio);
> +	xa_erase(tree, offset);
>  
>  	count_vm_event(ZSWPWB);
>  	if (entry->objcg)
> @@ -1118,9 +1139,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
>  
>  	/* start writeback */
>  	__swap_writepage(folio, &wbc);
> -	folio_put(folio);
>  
> -	return 0;
> +put_folio:
> +	folio_put(folio);
> +	return ret;
> +delete_unlock:
> +	delete_from_swap_cache(folio);
> +	folio_unlock(folio);
> +	goto put_folio;

I think I suggested a way to avoid this goto in v1:
https://lore.kernel.org/lkml/Z782SPcJI8DFISRa@google.com/.

Did this not work out?

>  }
>  
>  /*********************************
> @@ -1620,6 +1646,20 @@ bool zswap_store(struct folio *folio)
>  	return ret;
>  }
>  
> +/**
> + * zswap_load() - load a page from zswap
> + * @folio: folio to load
> + *
> + * Returns: true if zswap owns the swapped out contents, false otherwise.
> + *
> + * Note that the zswap_load() return value doesn't indicate success or failure,
> + * but whether zswap owns the swapped out contents. This MUST return true if
> + * zswap does own the swapped out contents, even if it fails to write the
> + * contents to the folio. Otherwise, the caller will try to read garbage from
> + * the backend.
> + *
> + * Success is signaled by marking the folio uptodate.
> + */
>  bool zswap_load(struct folio *folio)
>  {
>  	swp_entry_t swp = folio->swap;
> @@ -1644,6 +1684,17 @@ bool zswap_load(struct folio *folio)

The comment that exists here (not visible in the diff) should be
abbreviated now that we already explained the whole uptodate thing
above, right?

>  	if (WARN_ON_ONCE(folio_test_large(folio)))
>  		return true;
>  
> +	entry = xa_load(tree, offset);
> +	if (!entry)
> +		return false;
> +

A small comment here pointing out that we are deliberatly not setting
uptodate because of the failure may make things more obvious, or do you
think that's not needed?

> +	if (!zswap_decompress(entry, folio))
> +		return true;
> +
> +	count_vm_event(ZSWPIN);
> +	if (entry->objcg)
> +		count_objcg_events(entry->objcg, ZSWPIN, 1);
> +
>  	/*
>  	 * When reading into the swapcache, invalidate our entry. The
>  	 * swapcache can be the authoritative owner of the page and
> @@ -1656,21 +1707,8 @@ bool zswap_load(struct folio *folio)
>  	 * files, which reads into a private page and may free it if
>  	 * the fault fails. We remain the primary owner of the entry.)
>  	 */
> -	if (swapcache)
> -		entry = xa_erase(tree, offset);
> -	else
> -		entry = xa_load(tree, offset);
> -
> -	if (!entry)
> -		return false;
> -
> -	zswap_decompress(entry, folio);
> -
> -	count_vm_event(ZSWPIN);
> -	if (entry->objcg)
> -		count_objcg_events(entry->objcg, ZSWPIN, 1);
> -
>  	if (swapcache) {
> +		xa_erase(tree, offset);
>  		zswap_entry_free(entry);
>  		folio_mark_dirty(folio);
>  	}
> @@ -1771,6 +1809,8 @@ static int zswap_debugfs_init(void)
>  			   zswap_debugfs_root, &zswap_reject_compress_fail);
>  	debugfs_create_u64("reject_compress_poor", 0444,
>  			   zswap_debugfs_root, &zswap_reject_compress_poor);
> +	debugfs_create_u64("decompress_fail", 0444,
> +			   zswap_debugfs_root, &zswap_decompress_fail);
>  	debugfs_create_u64("written_back_pages", 0444,
>  			   zswap_debugfs_root, &zswap_written_back_pages);
>  	debugfs_create_file("pool_total_size", 0444,
> 
> base-commit: 598d34afeca6bb10554846cf157a3ded8729516c
> -- 
> 2.43.5
Johannes Weiner Feb. 27, 2025, 4:31 a.m. UTC | #2
On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> >  		return true;
> >  
> > +	entry = xa_load(tree, offset);
> > +	if (!entry)
> > +		return false;
> > +
> 
> A small comment here pointing out that we are deliberatly not setting
> uptodate because of the failure may make things more obvious, or do you
> think that's not needed?
>
> > +	if (!zswap_decompress(entry, folio))
> > +		return true;

How about an actual -ev and have this in swap_read_folio():

        ret = zswap_load(folio);
        if (ret != -ENOENT) {
                folio_unlock(folio);
                goto finish;
        }

	read from swapfile...

Then in zswap_load(), move uptodate further up like this (I had
previously suggested this):

	if (!zswap_decompress(entry, folio))
		return -EIO;

	folio_mark_uptodate(folio);

and I think it would be clear, even without or just minimal comments.
Yosry Ahmed Feb. 27, 2025, 5:44 a.m. UTC | #3
On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > >  		return true;
> > >  
> > > +	entry = xa_load(tree, offset);
> > > +	if (!entry)
> > > +		return false;
> > > +
> > 
> > A small comment here pointing out that we are deliberatly not setting
> > uptodate because of the failure may make things more obvious, or do you
> > think that's not needed?
> >
> > > +	if (!zswap_decompress(entry, folio))
> > > +		return true;
> 
> How about an actual -ev and have this in swap_read_folio():

Good idea, I was going to suggest an enum but this is simpler.

> 
>         ret = zswap_load(folio);
>         if (ret != -ENOENT) {
>                 folio_unlock(folio);
>                 goto finish;
>         }
> 
> 	read from swapfile...
> 
> Then in zswap_load(), move uptodate further up like this (I had
> previously suggested this):
> 
> 	if (!zswap_decompress(entry, folio))
> 		return -EIO;
> 
> 	folio_mark_uptodate(folio);
> 
> and I think it would be clear, even without or just minimal comments.

Another possibility is moving folio_mark_uptodate() back to
swap_read_folio(), which should make things even clearer imo as the
success/failure logic is all in one place:

	ret = zswap_load(folio);
	if (ret != -ENOENT) {
		folio_unlock(folio);
		/* Comment about not marking uptodate */
		if (!ret)
			folio_mark_uptodate();
		goto finish;
	}

or we can make it crystal clear we have 3 distinct cases:

	ret = zswap_load(folio);
	if (!ret) {
		folio_unlock(folio);
		folio_mark_uptodate();
		goto finish;
	} else if (ret != -ENOENT) {
		/* Comment about not marking uptodate */
		folio_unlock(folio);
		goto finish;
	}

WDYT?
Johannes Weiner Feb. 27, 2025, 6:16 a.m. UTC | #4
On Thu, Feb 27, 2025 at 05:44:29AM +0000, Yosry Ahmed wrote:
> On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> > On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > > >  		return true;
> > > >  
> > > > +	entry = xa_load(tree, offset);
> > > > +	if (!entry)
> > > > +		return false;
> > > > +
> > > 
> > > A small comment here pointing out that we are deliberatly not setting
> > > uptodate because of the failure may make things more obvious, or do you
> > > think that's not needed?
> > >
> > > > +	if (!zswap_decompress(entry, folio))
> > > > +		return true;
> > 
> > How about an actual -ev and have this in swap_read_folio():
> 
> Good idea, I was going to suggest an enum but this is simpler.
> 
> > 
> >         ret = zswap_load(folio);
> >         if (ret != -ENOENT) {
> >                 folio_unlock(folio);
> >                 goto finish;
> >         }
> > 
> > 	read from swapfile...
> > 
> > Then in zswap_load(), move uptodate further up like this (I had
> > previously suggested this):
> > 
> > 	if (!zswap_decompress(entry, folio))
> > 		return -EIO;
> > 
> > 	folio_mark_uptodate(folio);
> > 
> > and I think it would be clear, even without or just minimal comments.
> 
> Another possibility is moving folio_mark_uptodate() back to
> swap_read_folio(), which should make things even clearer imo as the
> success/failure logic is all in one place:

That works. bdev, swapfile and zeromap set the flag in that file.

> 	ret = zswap_load(folio);
> 	if (ret != -ENOENT) {
> 		folio_unlock(folio);
> 		/* Comment about not marking uptodate */
> 		if (!ret)
> 			folio_mark_uptodate();
> 		goto finish;
> 	}

Personally, I like this one ^. The comment isn't needed IMO, as now
zswap really isn't doing anything special compared to the others.

> or we can make it crystal clear we have 3 distinct cases:
> 
> 	ret = zswap_load(folio);
> 	if (!ret) {
> 		folio_unlock(folio);
> 		folio_mark_uptodate();
> 		goto finish;
> 	} else if (ret != -ENOENT) {
> 		/* Comment about not marking uptodate */
> 		folio_unlock(folio);
> 		goto finish;
> 	}

This seems unnecessarily repetetive.
Yosry Ahmed Feb. 27, 2025, 7:11 a.m. UTC | #5
On Thu, Feb 27, 2025 at 01:16:16AM -0500, Johannes Weiner wrote:
> On Thu, Feb 27, 2025 at 05:44:29AM +0000, Yosry Ahmed wrote:
> > On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> > > On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > > > >  		return true;
> > > > >  
> > > > > +	entry = xa_load(tree, offset);
> > > > > +	if (!entry)
> > > > > +		return false;
> > > > > +
> > > > 
> > > > A small comment here pointing out that we are deliberatly not setting
> > > > uptodate because of the failure may make things more obvious, or do you
> > > > think that's not needed?
> > > >
> > > > > +	if (!zswap_decompress(entry, folio))
> > > > > +		return true;
> > > 
> > > How about an actual -ev and have this in swap_read_folio():
> > 
> > Good idea, I was going to suggest an enum but this is simpler.
> > 
> > > 
> > >         ret = zswap_load(folio);
> > >         if (ret != -ENOENT) {
> > >                 folio_unlock(folio);
> > >                 goto finish;
> > >         }
> > > 
> > > 	read from swapfile...
> > > 
> > > Then in zswap_load(), move uptodate further up like this (I had
> > > previously suggested this):
> > > 
> > > 	if (!zswap_decompress(entry, folio))
> > > 		return -EIO;
> > > 
> > > 	folio_mark_uptodate(folio);
> > > 
> > > and I think it would be clear, even without or just minimal comments.
> > 
> > Another possibility is moving folio_mark_uptodate() back to
> > swap_read_folio(), which should make things even clearer imo as the
> > success/failure logic is all in one place:
> 
> That works. bdev, swapfile and zeromap set the flag in that file.
> 
> > 	ret = zswap_load(folio);
> > 	if (ret != -ENOENT) {
> > 		folio_unlock(folio);
> > 		/* Comment about not marking uptodate */
> > 		if (!ret)
> > 			folio_mark_uptodate();
> > 		goto finish;
> > 	}
> 
> Personally, I like this one ^. The comment isn't needed IMO, as now
> zswap really isn't doing anything special compared to the others.
> 
> > or we can make it crystal clear we have 3 distinct cases:
> > 
> > 	ret = zswap_load(folio);
> > 	if (!ret) {
> > 		folio_unlock(folio);
> > 		folio_mark_uptodate();
> > 		goto finish;
> > 	} else if (ret != -ENOENT) {
> > 		/* Comment about not marking uptodate */
> > 		folio_unlock(folio);
> > 		goto finish;
> > 	}
> 
> This seems unnecessarily repetetive.

I know, but looking at the two, this one makes it clearer to me there
are 3 distinct cases, and the redundancy is not terrible.

So I personally prefer the latter, but I am fine either way.
Yosry Ahmed Feb. 27, 2025, 7:29 a.m. UTC | #6
On Thu, Feb 27, 2025 at 07:11:59AM +0000, Yosry Ahmed wrote:
> On Thu, Feb 27, 2025 at 01:16:16AM -0500, Johannes Weiner wrote:
> > On Thu, Feb 27, 2025 at 05:44:29AM +0000, Yosry Ahmed wrote:
> > > On Wed, Feb 26, 2025 at 11:31:41PM -0500, Johannes Weiner wrote:
> > > > On Thu, Feb 27, 2025 at 01:19:31AM +0000, Yosry Ahmed wrote:
> > > > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > > > >  	if (WARN_ON_ONCE(folio_test_large(folio)))
> > > > > >  		return true;
> > > > > >  
> > > > > > +	entry = xa_load(tree, offset);
> > > > > > +	if (!entry)
> > > > > > +		return false;
> > > > > > +
> > > > > 
> > > > > A small comment here pointing out that we are deliberatly not setting
> > > > > uptodate because of the failure may make things more obvious, or do you
> > > > > think that's not needed?
> > > > >
> > > > > > +	if (!zswap_decompress(entry, folio))
> > > > > > +		return true;
> > > > 
> > > > How about an actual -ev and have this in swap_read_folio():
> > > 
> > > Good idea, I was going to suggest an enum but this is simpler.
> > > 
> > > > 
> > > >         ret = zswap_load(folio);
> > > >         if (ret != -ENOENT) {
> > > >                 folio_unlock(folio);
> > > >                 goto finish;
> > > >         }
> > > > 
> > > > 	read from swapfile...
> > > > 
> > > > Then in zswap_load(), move uptodate further up like this (I had
> > > > previously suggested this):
> > > > 
> > > > 	if (!zswap_decompress(entry, folio))
> > > > 		return -EIO;
> > > > 
> > > > 	folio_mark_uptodate(folio);
> > > > 
> > > > and I think it would be clear, even without or just minimal comments.
> > > 
> > > Another possibility is moving folio_mark_uptodate() back to
> > > swap_read_folio(), which should make things even clearer imo as the
> > > success/failure logic is all in one place:
> > 
> > That works. bdev, swapfile and zeromap set the flag in that file.
> > 
> > > 	ret = zswap_load(folio);
> > > 	if (ret != -ENOENT) {
> > > 		folio_unlock(folio);
> > > 		/* Comment about not marking uptodate */
> > > 		if (!ret)
> > > 			folio_mark_uptodate();
> > > 		goto finish;
> > > 	}
> > 
> > Personally, I like this one ^. The comment isn't needed IMO, as now
> > zswap really isn't doing anything special compared to the others.
> > 
> > > or we can make it crystal clear we have 3 distinct cases:
> > > 
> > > 	ret = zswap_load(folio);
> > > 	if (!ret) {
> > > 		folio_unlock(folio);
> > > 		folio_mark_uptodate();
> > > 		goto finish;
> > > 	} else if (ret != -ENOENT) {
> > > 		/* Comment about not marking uptodate */
> > > 		folio_unlock(folio);
> > > 		goto finish;
> > > 	}
> > 
> > This seems unnecessarily repetetive.
> 
> I know, but looking at the two, this one makes it clearer to me there
> are 3 distinct cases, and the redundancy is not terrible.
> 
> So I personally prefer the latter, but I am fine either way.

I just realized that swap_read_folio_zeromap() does the same trick, so
we should probably also move the folio_mark_uptodate() in there to
swap_read_folio().

Maybe we can do something like this:

/* Returns true if the folio was in the zeromap or zswap */
bool swap_read_folio_in_memory(struct folio *folio)
{
	int ret;

	ret = swap_read_folio_zeromap(folio);
	if (ret == -ENOENT)
		ret = zswap_load(folio);

	if (ret == 0) {
		folio_mark_uptodate(folio);
		folio_unlock(folio);
		return true;
	} else if (ret != -ENOENT) {
		folio_unlock(folio);
		return true;
	} else {
		return false;
	}
}

void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
{
	...
	if (swap_read_folio_in_memory(folio))
		goto finish;
	...
}

Admittedly, swap_read_folio_in_memory() is not a good name. Maybe
swap_read_folio_zeromap_or_zswap() :)
Johannes Weiner Feb. 27, 2025, 4:05 p.m. UTC | #7
On Thu, Feb 27, 2025 at 07:29:45AM +0000, Yosry Ahmed wrote:
> Maybe we can do something like this:
> 
> /* Returns true if the folio was in the zeromap or zswap */
> bool swap_read_folio_in_memory(struct folio *folio)
> {
> 	int ret;
> 
> 	ret = swap_read_folio_zeromap(folio);
> 	if (ret == -ENOENT)
> 		ret = zswap_load(folio);
> 
> 	if (ret == 0) {
> 		folio_mark_uptodate(folio);
> 		folio_unlock(folio);
> 		return true;
> 	} else if (ret != -ENOENT) {
> 		folio_unlock(folio);
> 		return true;
> 	} else {
> 		return false;
> 	}
> }

Eh, I think we're getting colder again.

This looks repetitive, zswap_load() is kind of awkward in that error
leg, and combining the two into one function is a bit of a stretch.

There is also something to be said about folio_mark_uptodate() and
folio_unlock() ususally being done by the backend implementation - in
what the page cache would call the "filler" method - to signal when
it's done reading, and what the outcome was.

E.g. for fs it's always in the specific ->read implementation:

static int simple_read_folio(struct file *file, struct folio *folio)
{
	folio_zero_range(folio, 0, folio_size(folio));
	flush_dcache_folio(folio);
	folio_mark_uptodate(folio);
	folio_unlock(folio);
	return 0;
}

and not in the generic manifold:

$ grep -c folio_mark_uptodate mm/filemap.c 
0

I'd actually rather push those down into zeromap and zswap as well to
follow that pattern more closely:

diff --git a/mm/page_io.c b/mm/page_io.c
index 9b983de351f9..1fb5ce1884bd 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -538,6 +538,7 @@ static bool swap_read_folio_zeromap(struct folio *folio)
 
 	folio_zero_range(folio, 0, folio_size(folio));
 	folio_mark_uptodate(folio);
+	folio_unlock(folio);
 	return true;
 }
 
@@ -635,13 +636,11 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
 	}
 	delayacct_swapin_start();
 
-	if (swap_read_folio_zeromap(folio)) {
-		folio_unlock(folio);
+	if (swap_read_folio_zeromap(folio))
 		goto finish;
-	} else if (zswap_load(folio)) {
-		folio_unlock(folio);
+
+	if (zswap_load(folio) != -ENOENT)
 		goto finish;
-	}
 
 	/* We have to read from slower devices. Increase zswap protection. */
 	zswap_folio_swapin(folio);
diff --git a/mm/zswap.c b/mm/zswap.c
index 6dbf31bd2218..76b2a964b0cd 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1620,7 +1620,7 @@ bool zswap_store(struct folio *folio)
 	return ret;
 }
 
-bool zswap_load(struct folio *folio)
+int zswap_load(struct folio *folio)
 {
 	swp_entry_t swp = folio->swap;
 	pgoff_t offset = swp_offset(swp);
@@ -1631,7 +1631,7 @@ bool zswap_load(struct folio *folio)
 	VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
 	if (zswap_never_enabled())
-		return false;
+		return -ENOENT;
 
 	/*
 	 * Large folios should not be swapped in while zswap is being used, as
@@ -1641,8 +1641,25 @@ bool zswap_load(struct folio *folio)
 	 * Return true without marking the folio uptodate so that an IO error is
 	 * emitted (e.g. do_swap_page() will sigbus).
 	 */
-	if (WARN_ON_ONCE(folio_test_large(folio)))
-		return true;
+	if (WARN_ON_ONCE(folio_test_large(folio))) {
+		folio_unlock(folio);
+		return -EINVAL;
+	}
+
+	entry = xa_load(tree, offset);
+	if (!entry)
+		return -ENOENT;
+
+	if (!zswap_decompress(entry, folio)) {
+		folio_unlock(folio);
+		return -EIO;
+	}
+
+	folio_mark_uptodate(folio);
+
+	count_vm_event(ZSWPIN);
+	if (entry->objcg)
+		count_objcg_events(entry->objcg, ZSWPIN, 1);
 
 	/*
 	 * When reading into the swapcache, invalidate our entry. The
@@ -1656,27 +1673,14 @@ bool zswap_load(struct folio *folio)
 	 * files, which reads into a private page and may free it if
 	 * the fault fails. We remain the primary owner of the entry.)
 	 */
-	if (swapcache)
-		entry = xa_erase(tree, offset);
-	else
-		entry = xa_load(tree, offset);
-
-	if (!entry)
-		return false;
-
-	zswap_decompress(entry, folio);
-
-	count_vm_event(ZSWPIN);
-	if (entry->objcg)
-		count_objcg_events(entry->objcg, ZSWPIN, 1);
-
 	if (swapcache) {
-		zswap_entry_free(entry);
 		folio_mark_dirty(folio);
+		xa_erase(tree, offset);
+		zswap_entry_free(entry);
 	}
 
-	folio_mark_uptodate(folio);
-	return true;
+	folio_unlock(folio);
+	return 0;
 }
 
 void zswap_invalidate(swp_entry_t swp)
Yosry Ahmed Feb. 27, 2025, 6:01 p.m. UTC | #8
On Thu, Feb 27, 2025 at 11:05:28AM -0500, Johannes Weiner wrote:
> On Thu, Feb 27, 2025 at 07:29:45AM +0000, Yosry Ahmed wrote:
> > Maybe we can do something like this:
> > 
> > /* Returns true if the folio was in the zeromap or zswap */
> > bool swap_read_folio_in_memory(struct folio *folio)
> > {
> > 	int ret;
> > 
> > 	ret = swap_read_folio_zeromap(folio);
> > 	if (ret == -ENOENT)
> > 		ret = zswap_load(folio);
> > 
> > 	if (ret == 0) {
> > 		folio_mark_uptodate(folio);
> > 		folio_unlock(folio);
> > 		return true;
> > 	} else if (ret != -ENOENT) {
> > 		folio_unlock(folio);
> > 		return true;
> > 	} else {
> > 		return false;
> > 	}
> > }
> 
> Eh, I think we're getting colder again.
> 
> This looks repetitive, zswap_load() is kind of awkward in that error
> leg, and combining the two into one function is a bit of a stretch.
> 
> There is also something to be said about folio_mark_uptodate() and
> folio_unlock() ususally being done by the backend implementation - in
> what the page cache would call the "filler" method - to signal when
> it's done reading, and what the outcome was.
> 
> E.g. for fs it's always in the specific ->read implementation:
> 
> static int simple_read_folio(struct file *file, struct folio *folio)
> {
> 	folio_zero_range(folio, 0, folio_size(folio));
> 	flush_dcache_folio(folio);
> 	folio_mark_uptodate(folio);
> 	folio_unlock(folio);
> 	return 0;
> }
> 
> and not in the generic manifold:
> 
> $ grep -c folio_mark_uptodate mm/filemap.c 
> 0
> 
> I'd actually rather push those down into zeromap and zswap as well to
> follow that pattern more closely:

Hmm yeah for the sake of consistency I think we can do that. My main
concern was the comments clarifying the 'true' return value without
marking uptodate is a fail in a lot of places in zswap and zeromap code.
However, I think returning an error code as you suggested makes it more
obvious and reduces the need for comments.

So yes I think your proposed approach is better (with a few comments).

> 
> diff --git a/mm/page_io.c b/mm/page_io.c
> index 9b983de351f9..1fb5ce1884bd 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -538,6 +538,7 @@ static bool swap_read_folio_zeromap(struct folio *folio)
>  
>  	folio_zero_range(folio, 0, folio_size(folio));
>  	folio_mark_uptodate(folio);
> +	folio_unlock(folio);
>  	return true;

So I think we should double down and follow the same pattern for
swap_read_folio_zeromap() too. Return an error code too to clarify the
skip uptodate case.

>  }
>  
> @@ -635,13 +636,11 @@ void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
>  	}
>  	delayacct_swapin_start();
>  
> -	if (swap_read_folio_zeromap(folio)) {
> -		folio_unlock(folio);
> +	if (swap_read_folio_zeromap(folio))
>  		goto finish;

and this ends up being closer to the zswap pattern:

	if (swap_read_folio_zeromap(folio) != -ENOENT)
		goto finish;

> -	} else if (zswap_load(folio)) {
> -		folio_unlock(folio);
> +
> +	if (zswap_load(folio) != -ENOENT)
>  		goto finish;
> -	}
>  
>  	/* We have to read from slower devices. Increase zswap protection. */
>  	zswap_folio_swapin(folio);
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 6dbf31bd2218..76b2a964b0cd 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1620,7 +1620,7 @@ bool zswap_store(struct folio *folio)
>  	return ret;
>  }
>  

And here we should explain the different return codes and when we return
with the folio locked/unlocked or marked uptodate.

> -bool zswap_load(struct folio *folio)
> +int zswap_load(struct folio *folio)
>  {
>  	swp_entry_t swp = folio->swap;
>  	pgoff_t offset = swp_offset(swp);
> @@ -1631,7 +1631,7 @@ bool zswap_load(struct folio *folio)
>  	VM_WARN_ON_ONCE(!folio_test_locked(folio));
>  
>  	if (zswap_never_enabled())
> -		return false;
> +		return -ENOENT;
>  
>  	/*
>  	 * Large folios should not be swapped in while zswap is being used, as
> @@ -1641,8 +1641,25 @@ bool zswap_load(struct folio *folio)
>  	 * Return true without marking the folio uptodate so that an IO error is
>  	 * emitted (e.g. do_swap_page() will sigbus).
>  	 */
> -	if (WARN_ON_ONCE(folio_test_large(folio)))
> -		return true;
> +	if (WARN_ON_ONCE(folio_test_large(folio))) {
> +		folio_unlock(folio);
> +		return -EINVAL;
> +	}
> +
> +	entry = xa_load(tree, offset);
> +	if (!entry)
> +		return -ENOENT;
> +
> +	if (!zswap_decompress(entry, folio)) {
> +		folio_unlock(folio);
> +		return -EIO;
> +	}
> +
> +	folio_mark_uptodate(folio);
> +
> +	count_vm_event(ZSWPIN);
> +	if (entry->objcg)
> +		count_objcg_events(entry->objcg, ZSWPIN, 1);
>  
>  	/*
>  	 * When reading into the swapcache, invalidate our entry. The
> @@ -1656,27 +1673,14 @@ bool zswap_load(struct folio *folio)
>  	 * files, which reads into a private page and may free it if
>  	 * the fault fails. We remain the primary owner of the entry.)
>  	 */
> -	if (swapcache)
> -		entry = xa_erase(tree, offset);
> -	else
> -		entry = xa_load(tree, offset);
> -
> -	if (!entry)
> -		return false;
> -
> -	zswap_decompress(entry, folio);
> -
> -	count_vm_event(ZSWPIN);
> -	if (entry->objcg)
> -		count_objcg_events(entry->objcg, ZSWPIN, 1);
> -
>  	if (swapcache) {
> -		zswap_entry_free(entry);
>  		folio_mark_dirty(folio);
> +		xa_erase(tree, offset);
> +		zswap_entry_free(entry);
>  	}
>  
> -	folio_mark_uptodate(folio);
> -	return true;
> +	folio_unlock(folio);
> +	return 0;
>  }
>  
>  void zswap_invalidate(swp_entry_t swp)
Nhat Pham Feb. 27, 2025, 9:46 p.m. UTC | #9
On Wed, Feb 26, 2025 at 5:19 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
>
> On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > Currently, we crash the kernel when a decompression failure occurs in
> > zswap (either because of memory corruption, or a bug in the compression
> > algorithm). This is overkill. We should only SIGBUS the unfortunate
> > process asking for the zswap entry on zswap load, and skip the corrupted
> > entry in zswap writeback. The former is accomplished by returning true
> > from zswap_load(), indicating that zswap owns the swapped out content,
> > but without flagging the folio as up-to-date. The process trying to swap
> > in the page will check for the uptodate folio flag and SIGBUS (see
> > do_swap_page() in mm/memory.c for more details).
>
> We should call out the extra xarray walks and their perf impact (if
> any).

Lemme throw this in a quick and dirty test. I doubt there's any
impact, but since I'm reworking this patch for a third version anyway
might as well.

>
> >
> > See [1] for a recent upstream discussion about this.
> >
> > [1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/
> >
> > Suggested-by: Matthew Wilcox <willy@infradead.org>
> > Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> > Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> > ---
> >  mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
> >  1 file changed, 67 insertions(+), 27 deletions(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index 6dbf31bd2218..e4a2157bbc64 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail;
> >  static u64 zswap_reject_compress_fail;
> >  /* Compressed page was too big for the allocator to (optimally) store */
> >  static u64 zswap_reject_compress_poor;
> > +/* Load or writeback failed due to decompression failure */
> > +static u64 zswap_decompress_fail;
> >  /* Store failed because underlying allocator could not get memory */
> >  static u64 zswap_reject_alloc_fail;
> >  /* Store failed because the entry metadata could not be allocated (rare) */
> > @@ -996,11 +998,13 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> >       return comp_ret == 0 && alloc_ret == 0;
> >  }
> >
> > -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> > +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> >  {
> >       struct zpool *zpool = entry->pool->zpool;
> >       struct scatterlist input, output;
> >       struct crypto_acomp_ctx *acomp_ctx;
> > +     int decomp_ret;
> > +     bool ret = true;
> >       u8 *src;
> >
> >       acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
> > @@ -1025,12 +1029,25 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> >       sg_init_table(&output, 1);
> >       sg_set_folio(&output, folio, PAGE_SIZE, 0);
> >       acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> > -     BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> > -     BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
> > +     decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> > +     if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
> > +             ret = false;
> > +             zswap_decompress_fail++;
> > +             pr_alert_ratelimited(
> > +                     "decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",
>
> This is a very long line. I think we should break it into multiple
> lines. I know multiline strings are frowned upon by checkpatch, by this
> exist (see the warning in mem_cgroup_oom_control_write() for example),
> and they are definitely better than a very long line imo.

My personal take is I prefer multi line strings, but was not sure what
is the "preferred" or "official" style. Oh well.

>
> > +                     decomp_ret,
> > +                     entry->swpentry.val,
> > +                     swp_type(entry->swpentry),
> > +                     swp_offset(entry->swpentry),
> > +                     entry->pool->tfm_name,
> > +                     entry->length,
> > +                     acomp_ctx->req->dlen);
> > +     }
> >
> >       if (src != acomp_ctx->buffer)
> >               zpool_unmap_handle(zpool, entry->handle);
> >       acomp_ctx_put_unlock(acomp_ctx);
> > +     return ret;
>
> Not a big deal but we could probably store the length in a local
> variable and move the check here, and avoid needing 'ret'.

Ah the suggestion you made in an older version right? But sounds like
we're just trading some one local variable for another?

That said, it *technically* move some work outside of the lock
section. I'll just give it a try :)

>
> >  }
> >
> >  /*********************************
> > @@ -1060,6 +1077,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> >       struct writeback_control wbc = {
> >               .sync_mode = WB_SYNC_NONE,
> >       };
> > +     int ret = 0;
> >
> >       /* try to allocate swap cache folio */
> >       si = get_swap_device(swpentry);
> > @@ -1081,8 +1099,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> >        * and freed when invalidated by the concurrent shrinker anyway.
> >        */
> >       if (!folio_was_allocated) {
> > -             folio_put(folio);
> > -             return -EEXIST;
> > +             ret = -EEXIST;
> > +             goto put_folio;
> >       }
> >
> >       /*
> > @@ -1095,14 +1113,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> >        * be dereferenced.
> >        */
> >       tree = swap_zswap_tree(swpentry);
> > -     if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
> > -             delete_from_swap_cache(folio);
> > -             folio_unlock(folio);
> > -             folio_put(folio);
> > -             return -ENOMEM;
> > +     if (entry != xa_load(tree, offset)) {
> > +             ret = -ENOMEM;
> > +             goto delete_unlock;
> > +     }
> > +
> > +     if (!zswap_decompress(entry, folio)) {
> > +             ret = -EIO;
> > +             goto delete_unlock;
> >       }
> >
> > -     zswap_decompress(entry, folio);
> > +     xa_erase(tree, offset);
> >
> >       count_vm_event(ZSWPWB);
> >       if (entry->objcg)
> > @@ -1118,9 +1139,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> >
> >       /* start writeback */
> >       __swap_writepage(folio, &wbc);
> > -     folio_put(folio);
> >
> > -     return 0;
> > +put_folio:
> > +     folio_put(folio);
> > +     return ret;
> > +delete_unlock:
> > +     delete_from_swap_cache(folio);
> > +     folio_unlock(folio);
> > +     goto put_folio;
>
> I think I suggested a way to avoid this goto in v1:
> https://lore.kernel.org/lkml/Z782SPcJI8DFISRa@google.com/.
>
> Did this not work out?

Oh I thought your suggestion was the same as Johannes. Let me take a
closer look...

>
> >  }
> >
> >  /*********************************
> > @@ -1620,6 +1646,20 @@ bool zswap_store(struct folio *folio)
> >       return ret;
> >  }
> >
> > +/**
> > + * zswap_load() - load a page from zswap
> > + * @folio: folio to load
> > + *
> > + * Returns: true if zswap owns the swapped out contents, false otherwise.
> > + *
> > + * Note that the zswap_load() return value doesn't indicate success or failure,
> > + * but whether zswap owns the swapped out contents. This MUST return true if
> > + * zswap does own the swapped out contents, even if it fails to write the
> > + * contents to the folio. Otherwise, the caller will try to read garbage from
> > + * the backend.
> > + *
> > + * Success is signaled by marking the folio uptodate.
> > + */
> >  bool zswap_load(struct folio *folio)
> >  {
> >       swp_entry_t swp = folio->swap;
> > @@ -1644,6 +1684,17 @@ bool zswap_load(struct folio *folio)
>
> The comment that exists here (not visible in the diff) should be
> abbreviated now that we already explained the whole uptodate thing
> above, right?

Lemme take a stab at it :)

>
> >       if (WARN_ON_ONCE(folio_test_large(folio)))
> >               return true;
> >
> > +     entry = xa_load(tree, offset);
> > +     if (!entry)
> > +             return false;
> > +
>
> A small comment here pointing out that we are deliberatly not setting
> uptodate because of the failure may make things more obvious, or do you
> think that's not needed?

I was thinking the comment above zswap_load() should be necessary, but
a small comment here won't hurt.

>
> > +     if (!zswap_decompress(entry, folio))
> > +             return true;
> > +
> > +     count_vm_event(ZSWPIN);
> > +     if (entry->objcg)
> > +             count_objcg_events(entry->objcg, ZSWPIN, 1);
> > +
> >       /*
> >        * When reading into the swapcache, invalidate our entry. The
> >        * swapcache can be the authoritative owner of the page and
> > @@ -1656,21 +1707,8 @@ bool zswap_load(struct folio *folio)
> >        * files, which reads into a private page and may free it if
> >        * the fault fails. We remain the primary owner of the entry.)
> >        */
> > -     if (swapcache)
> > -             entry = xa_erase(tree, offset);
> > -     else
> > -             entry = xa_load(tree, offset);
> > -
> > -     if (!entry)
> > -             return false;
> > -
> > -     zswap_decompress(entry, folio);
> > -
> > -     count_vm_event(ZSWPIN);
> > -     if (entry->objcg)
> > -             count_objcg_events(entry->objcg, ZSWPIN, 1);
> > -
> >       if (swapcache) {
> > +             xa_erase(tree, offset);
> >               zswap_entry_free(entry);
> >               folio_mark_dirty(folio);
> >       }
> > @@ -1771,6 +1809,8 @@ static int zswap_debugfs_init(void)
> >                          zswap_debugfs_root, &zswap_reject_compress_fail);
> >       debugfs_create_u64("reject_compress_poor", 0444,
> >                          zswap_debugfs_root, &zswap_reject_compress_poor);
> > +     debugfs_create_u64("decompress_fail", 0444,
> > +                        zswap_debugfs_root, &zswap_decompress_fail);
> >       debugfs_create_u64("written_back_pages", 0444,
> >                          zswap_debugfs_root, &zswap_written_back_pages);
> >       debugfs_create_file("pool_total_size", 0444,
> >
> > base-commit: 598d34afeca6bb10554846cf157a3ded8729516c
> > --
> > 2.43.5
Yosry Ahmed Feb. 27, 2025, 9:55 p.m. UTC | #10
On Thu, Feb 27, 2025 at 01:46:29PM -0800, Nhat Pham wrote:
> On Wed, Feb 26, 2025 at 5:19 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
> >
> > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > Currently, we crash the kernel when a decompression failure occurs in
> > > zswap (either because of memory corruption, or a bug in the compression
> > > algorithm). This is overkill. We should only SIGBUS the unfortunate
> > > process asking for the zswap entry on zswap load, and skip the corrupted
> > > entry in zswap writeback. The former is accomplished by returning true
> > > from zswap_load(), indicating that zswap owns the swapped out content,
> > > but without flagging the folio as up-to-date. The process trying to swap
> > > in the page will check for the uptodate folio flag and SIGBUS (see
> > > do_swap_page() in mm/memory.c for more details).
> >
> > We should call out the extra xarray walks and their perf impact (if
> > any).
> 
> Lemme throw this in a quick and dirty test. I doubt there's any
> impact, but since I'm reworking this patch for a third version anyway
> might as well.

It's likely everything is cache hot and the impact is minimal, but let's
do the due diligence.

> 
> >
> > >
> > > See [1] for a recent upstream discussion about this.
> > >
> > > [1]: https://lore.kernel.org/all/ZsiLElTykamcYZ6J@casper.infradead.org/
> > >
> > > Suggested-by: Matthew Wilcox <willy@infradead.org>
> > > Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> > > Signed-off-by: Nhat Pham <nphamcs@gmail.com>
> > > ---
> > >  mm/zswap.c | 94 ++++++++++++++++++++++++++++++++++++++----------------
> > >  1 file changed, 67 insertions(+), 27 deletions(-)
> > >
> > > diff --git a/mm/zswap.c b/mm/zswap.c
> > > index 6dbf31bd2218..e4a2157bbc64 100644
> > > --- a/mm/zswap.c
> > > +++ b/mm/zswap.c
> > > @@ -62,6 +62,8 @@ static u64 zswap_reject_reclaim_fail;
> > >  static u64 zswap_reject_compress_fail;
> > >  /* Compressed page was too big for the allocator to (optimally) store */
> > >  static u64 zswap_reject_compress_poor;
> > > +/* Load or writeback failed due to decompression failure */
> > > +static u64 zswap_decompress_fail;
> > >  /* Store failed because underlying allocator could not get memory */
> > >  static u64 zswap_reject_alloc_fail;
> > >  /* Store failed because the entry metadata could not be allocated (rare) */
> > > @@ -996,11 +998,13 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> > >       return comp_ret == 0 && alloc_ret == 0;
> > >  }
> > >
> > > -static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> > > +static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> > >  {
> > >       struct zpool *zpool = entry->pool->zpool;
> > >       struct scatterlist input, output;
> > >       struct crypto_acomp_ctx *acomp_ctx;
> > > +     int decomp_ret;
> > > +     bool ret = true;
> > >       u8 *src;
> > >
> > >       acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
> > > @@ -1025,12 +1029,25 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
> > >       sg_init_table(&output, 1);
> > >       sg_set_folio(&output, folio, PAGE_SIZE, 0);
> > >       acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
> > > -     BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
> > > -     BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
> > > +     decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
> > > +     if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
> > > +             ret = false;
> > > +             zswap_decompress_fail++;
> > > +             pr_alert_ratelimited(
> > > +                     "decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",
> >
> > This is a very long line. I think we should break it into multiple
> > lines. I know multiline strings are frowned upon by checkpatch, by this
> > exist (see the warning in mem_cgroup_oom_control_write() for example),
> > and they are definitely better than a very long line imo.
> 
> My personal take is I prefer multi line strings, but was not sure what
> is the "preferred" or "official" style. Oh well.
> 
> >
> > > +                     decomp_ret,
> > > +                     entry->swpentry.val,
> > > +                     swp_type(entry->swpentry),
> > > +                     swp_offset(entry->swpentry),
> > > +                     entry->pool->tfm_name,
> > > +                     entry->length,
> > > +                     acomp_ctx->req->dlen);
> > > +     }
> > >
> > >       if (src != acomp_ctx->buffer)
> > >               zpool_unmap_handle(zpool, entry->handle);
> > >       acomp_ctx_put_unlock(acomp_ctx);
> > > +     return ret;
> >
> > Not a big deal but we could probably store the length in a local
> > variable and move the check here, and avoid needing 'ret'.
> 
> Ah the suggestion you made in an older version right? But sounds like
> we're just trading some one local variable for another?
> 
> That said, it *technically* move some work outside of the lock
> section. I'll just give it a try :)

My goal is not really to get rid of the local variable, but rather to
obviously return 'true' or 'false' directly rather than a 'ret' value.

> 
> >
> > >  }
> > >
> > >  /*********************************
> > > @@ -1060,6 +1077,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> > >       struct writeback_control wbc = {
> > >               .sync_mode = WB_SYNC_NONE,
> > >       };
> > > +     int ret = 0;
> > >
> > >       /* try to allocate swap cache folio */
> > >       si = get_swap_device(swpentry);
> > > @@ -1081,8 +1099,8 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> > >        * and freed when invalidated by the concurrent shrinker anyway.
> > >        */
> > >       if (!folio_was_allocated) {
> > > -             folio_put(folio);
> > > -             return -EEXIST;
> > > +             ret = -EEXIST;
> > > +             goto put_folio;
> > >       }
> > >
> > >       /*
> > > @@ -1095,14 +1113,17 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> > >        * be dereferenced.
> > >        */
> > >       tree = swap_zswap_tree(swpentry);
> > > -     if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
> > > -             delete_from_swap_cache(folio);
> > > -             folio_unlock(folio);
> > > -             folio_put(folio);
> > > -             return -ENOMEM;
> > > +     if (entry != xa_load(tree, offset)) {
> > > +             ret = -ENOMEM;
> > > +             goto delete_unlock;
> > > +     }
> > > +
> > > +     if (!zswap_decompress(entry, folio)) {
> > > +             ret = -EIO;
> > > +             goto delete_unlock;
> > >       }
> > >
> > > -     zswap_decompress(entry, folio);
> > > +     xa_erase(tree, offset);
> > >
> > >       count_vm_event(ZSWPWB);
> > >       if (entry->objcg)
> > > @@ -1118,9 +1139,14 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
> > >
> > >       /* start writeback */
> > >       __swap_writepage(folio, &wbc);
> > > -     folio_put(folio);
> > >
> > > -     return 0;
> > > +put_folio:
> > > +     folio_put(folio);
> > > +     return ret;
> > > +delete_unlock:
> > > +     delete_from_swap_cache(folio);
> > > +     folio_unlock(folio);
> > > +     goto put_folio;
> >
> > I think I suggested a way to avoid this goto in v1:
> > https://lore.kernel.org/lkml/Z782SPcJI8DFISRa@google.com/.
> >
> > Did this not work out?
> 
> Oh I thought your suggestion was the same as Johannes. Let me take a
> closer look...
> 
> >
> > >  }
> > >
> > >  /*********************************
> > > @@ -1620,6 +1646,20 @@ bool zswap_store(struct folio *folio)
> > >       return ret;
> > >  }
> > >
> > > +/**
> > > + * zswap_load() - load a page from zswap
> > > + * @folio: folio to load
> > > + *
> > > + * Returns: true if zswap owns the swapped out contents, false otherwise.
> > > + *
> > > + * Note that the zswap_load() return value doesn't indicate success or failure,
> > > + * but whether zswap owns the swapped out contents. This MUST return true if
> > > + * zswap does own the swapped out contents, even if it fails to write the
> > > + * contents to the folio. Otherwise, the caller will try to read garbage from
> > > + * the backend.
> > > + *
> > > + * Success is signaled by marking the folio uptodate.
> > > + */
> > >  bool zswap_load(struct folio *folio)
> > >  {
> > >       swp_entry_t swp = folio->swap;
> > > @@ -1644,6 +1684,17 @@ bool zswap_load(struct folio *folio)
> >
> > The comment that exists here (not visible in the diff) should be
> > abbreviated now that we already explained the whole uptodate thing
> > above, right?
> 
> Lemme take a stab at it :)

Take a look at the other thread between Johannes and I first. We
discussed more involved changes around this.
Nhat Pham Feb. 27, 2025, 10:35 p.m. UTC | #11
On Thu, Feb 27, 2025 at 8:05 AM Johannes Weiner <hannes@cmpxchg.org> wrote:
>
> On Thu, Feb 27, 2025 at 07:29:45AM +0000, Yosry Ahmed wrote:

I like this actually,

> I'd actually rather push those down into zeromap and zswap as well to
> follow that pattern more closely:
>
> diff --git a/mm/page_io.c b/mm/page_io.c
> index 9b983de351f9..1fb5ce1884bd 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -538,6 +538,7 @@ static bool swap_read_folio_zeromap(struct folio *folio)
>
>         folio_zero_range(folio, 0, folio_size(folio));
>         folio_mark_uptodate(folio);

There should be another folio_unlock() above I think.

> +       folio_unlock(folio);
>         return true;
>  }
>
Nhat Pham March 1, 2025, 2:08 a.m. UTC | #12
On Thu, Feb 27, 2025 at 1:55 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
>
> On Thu, Feb 27, 2025 at 01:46:29PM -0800, Nhat Pham wrote:
> > On Wed, Feb 26, 2025 at 5:19 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
> > >
> > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > > Currently, we crash the kernel when a decompression failure occurs in
> > > > zswap (either because of memory corruption, or a bug in the compression
> > > > algorithm). This is overkill. We should only SIGBUS the unfortunate
> > > > process asking for the zswap entry on zswap load, and skip the corrupted
> > > > entry in zswap writeback. The former is accomplished by returning true
> > > > from zswap_load(), indicating that zswap owns the swapped out content,
> > > > but without flagging the folio as up-to-date. The process trying to swap
> > > > in the page will check for the uptodate folio flag and SIGBUS (see
> > > > do_swap_page() in mm/memory.c for more details).
> > >
> > > We should call out the extra xarray walks and their perf impact (if
> > > any).
> >
> > Lemme throw this in a quick and dirty test. I doubt there's any
> > impact, but since I'm reworking this patch for a third version anyway
> > might as well.
>
> It's likely everything is cache hot and the impact is minimal, but let's
> do the due diligence.
>

Yeah I ran some kernel building tests for 5 times, and found basically
no difference:

With the new scheme:

real: mean: 125.1s, stdev: 0.12s

user: mean: 3265.23s, stdev: 9.62s

sys: mean: 2156.41s, stdev: 13.98s


The old scheme:

real: mean: 125.78s, stdev: 0.45s

user: mean: 3287.18s, stdev: 5.95s

sys: mean: 2177.08s, stdev: 26.52s


Honestly, eyeballing the results, the mean difference is probably
smaller than between-run variance. :)
Yosry Ahmed March 1, 2025, 2:20 a.m. UTC | #13
On Fri, Feb 28, 2025 at 06:08:16PM -0800, Nhat Pham wrote:
> On Thu, Feb 27, 2025 at 1:55 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
> >
> > On Thu, Feb 27, 2025 at 01:46:29PM -0800, Nhat Pham wrote:
> > > On Wed, Feb 26, 2025 at 5:19 PM Yosry Ahmed <yosry.ahmed@linux.dev> wrote:
> > > >
> > > > On Wed, Feb 26, 2025 at 04:14:45PM -0800, Nhat Pham wrote:
> > > > > Currently, we crash the kernel when a decompression failure occurs in
> > > > > zswap (either because of memory corruption, or a bug in the compression
> > > > > algorithm). This is overkill. We should only SIGBUS the unfortunate
> > > > > process asking for the zswap entry on zswap load, and skip the corrupted
> > > > > entry in zswap writeback. The former is accomplished by returning true
> > > > > from zswap_load(), indicating that zswap owns the swapped out content,
> > > > > but without flagging the folio as up-to-date. The process trying to swap
> > > > > in the page will check for the uptodate folio flag and SIGBUS (see
> > > > > do_swap_page() in mm/memory.c for more details).
> > > >
> > > > We should call out the extra xarray walks and their perf impact (if
> > > > any).
> > >
> > > Lemme throw this in a quick and dirty test. I doubt there's any
> > > impact, but since I'm reworking this patch for a third version anyway
> > > might as well.
> >
> > It's likely everything is cache hot and the impact is minimal, but let's
> > do the due diligence.
> >
> 
> Yeah I ran some kernel building tests for 5 times, and found basically
> no difference:
> 
> With the new scheme:
> 
> real: mean: 125.1s, stdev: 0.12s
> 
> user: mean: 3265.23s, stdev: 9.62s
> 
> sys: mean: 2156.41s, stdev: 13.98s
> 
> 
> The old scheme:
> 
> real: mean: 125.78s, stdev: 0.45s
> 
> user: mean: 3287.18s, stdev: 5.95s
> 
> sys: mean: 2177.08s, stdev: 26.52s
> 
> 
> Honestly, eyeballing the results, the mean difference is probably
> smaller than between-run variance. :)

Thanks for checking, that's good. Let's include that in the commit
message too.
diff mbox series

Patch

diff --git a/mm/zswap.c b/mm/zswap.c
index 6dbf31bd2218..e4a2157bbc64 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -62,6 +62,8 @@  static u64 zswap_reject_reclaim_fail;
 static u64 zswap_reject_compress_fail;
 /* Compressed page was too big for the allocator to (optimally) store */
 static u64 zswap_reject_compress_poor;
+/* Load or writeback failed due to decompression failure */
+static u64 zswap_decompress_fail;
 /* Store failed because underlying allocator could not get memory */
 static u64 zswap_reject_alloc_fail;
 /* Store failed because the entry metadata could not be allocated (rare) */
@@ -996,11 +998,13 @@  static bool zswap_compress(struct page *page, struct zswap_entry *entry,
 	return comp_ret == 0 && alloc_ret == 0;
 }
 
-static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
+static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 {
 	struct zpool *zpool = entry->pool->zpool;
 	struct scatterlist input, output;
 	struct crypto_acomp_ctx *acomp_ctx;
+	int decomp_ret;
+	bool ret = true;
 	u8 *src;
 
 	acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
@@ -1025,12 +1029,25 @@  static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
 	sg_init_table(&output, 1);
 	sg_set_folio(&output, folio, PAGE_SIZE, 0);
 	acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
-	BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
-	BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+	decomp_ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
+	if (decomp_ret || acomp_ctx->req->dlen != PAGE_SIZE) {
+		ret = false;
+		zswap_decompress_fail++;
+		pr_alert_ratelimited(
+			"decompression failed with returned value %d on zswap entry with swap entry value %08lx, swap type %d, and swap offset %lu. compression algorithm is %s. compressed size is %u bytes, and decompressed size is %u bytes.\n",
+			decomp_ret,
+			entry->swpentry.val,
+			swp_type(entry->swpentry),
+			swp_offset(entry->swpentry),
+			entry->pool->tfm_name,
+			entry->length,
+			acomp_ctx->req->dlen);
+	}
 
 	if (src != acomp_ctx->buffer)
 		zpool_unmap_handle(zpool, entry->handle);
 	acomp_ctx_put_unlock(acomp_ctx);
+	return ret;
 }
 
 /*********************************
@@ -1060,6 +1077,7 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	struct writeback_control wbc = {
 		.sync_mode = WB_SYNC_NONE,
 	};
+	int ret = 0;
 
 	/* try to allocate swap cache folio */
 	si = get_swap_device(swpentry);
@@ -1081,8 +1099,8 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	 * and freed when invalidated by the concurrent shrinker anyway.
 	 */
 	if (!folio_was_allocated) {
-		folio_put(folio);
-		return -EEXIST;
+		ret = -EEXIST;
+		goto put_folio;
 	}
 
 	/*
@@ -1095,14 +1113,17 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 	 * be dereferenced.
 	 */
 	tree = swap_zswap_tree(swpentry);
-	if (entry != xa_cmpxchg(tree, offset, entry, NULL, GFP_KERNEL)) {
-		delete_from_swap_cache(folio);
-		folio_unlock(folio);
-		folio_put(folio);
-		return -ENOMEM;
+	if (entry != xa_load(tree, offset)) {
+		ret = -ENOMEM;
+		goto delete_unlock;
+	}
+
+	if (!zswap_decompress(entry, folio)) {
+		ret = -EIO;
+		goto delete_unlock;
 	}
 
-	zswap_decompress(entry, folio);
+	xa_erase(tree, offset);
 
 	count_vm_event(ZSWPWB);
 	if (entry->objcg)
@@ -1118,9 +1139,14 @@  static int zswap_writeback_entry(struct zswap_entry *entry,
 
 	/* start writeback */
 	__swap_writepage(folio, &wbc);
-	folio_put(folio);
 
-	return 0;
+put_folio:
+	folio_put(folio);
+	return ret;
+delete_unlock:
+	delete_from_swap_cache(folio);
+	folio_unlock(folio);
+	goto put_folio;
 }
 
 /*********************************
@@ -1620,6 +1646,20 @@  bool zswap_store(struct folio *folio)
 	return ret;
 }
 
+/**
+ * zswap_load() - load a page from zswap
+ * @folio: folio to load
+ *
+ * Returns: true if zswap owns the swapped out contents, false otherwise.
+ *
+ * Note that the zswap_load() return value doesn't indicate success or failure,
+ * but whether zswap owns the swapped out contents. This MUST return true if
+ * zswap does own the swapped out contents, even if it fails to write the
+ * contents to the folio. Otherwise, the caller will try to read garbage from
+ * the backend.
+ *
+ * Success is signaled by marking the folio uptodate.
+ */
 bool zswap_load(struct folio *folio)
 {
 	swp_entry_t swp = folio->swap;
@@ -1644,6 +1684,17 @@  bool zswap_load(struct folio *folio)
 	if (WARN_ON_ONCE(folio_test_large(folio)))
 		return true;
 
+	entry = xa_load(tree, offset);
+	if (!entry)
+		return false;
+
+	if (!zswap_decompress(entry, folio))
+		return true;
+
+	count_vm_event(ZSWPIN);
+	if (entry->objcg)
+		count_objcg_events(entry->objcg, ZSWPIN, 1);
+
 	/*
 	 * When reading into the swapcache, invalidate our entry. The
 	 * swapcache can be the authoritative owner of the page and
@@ -1656,21 +1707,8 @@  bool zswap_load(struct folio *folio)
 	 * files, which reads into a private page and may free it if
 	 * the fault fails. We remain the primary owner of the entry.)
 	 */
-	if (swapcache)
-		entry = xa_erase(tree, offset);
-	else
-		entry = xa_load(tree, offset);
-
-	if (!entry)
-		return false;
-
-	zswap_decompress(entry, folio);
-
-	count_vm_event(ZSWPIN);
-	if (entry->objcg)
-		count_objcg_events(entry->objcg, ZSWPIN, 1);
-
 	if (swapcache) {
+		xa_erase(tree, offset);
 		zswap_entry_free(entry);
 		folio_mark_dirty(folio);
 	}
@@ -1771,6 +1809,8 @@  static int zswap_debugfs_init(void)
 			   zswap_debugfs_root, &zswap_reject_compress_fail);
 	debugfs_create_u64("reject_compress_poor", 0444,
 			   zswap_debugfs_root, &zswap_reject_compress_poor);
+	debugfs_create_u64("decompress_fail", 0444,
+			   zswap_debugfs_root, &zswap_decompress_fail);
 	debugfs_create_u64("written_back_pages", 0444,
 			   zswap_debugfs_root, &zswap_written_back_pages);
 	debugfs_create_file("pool_total_size", 0444,