Message ID | 59f75f70e743049cbb019752baf094a7e2f044fa.1731518011.git.jth@kernel.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | btrfs: fix use-after-free in btrfs_encoded_read_endio | expand |
On Wed, Nov 13, 2024 at 5:17 PM Johannes Thumshirn <jth@kernel.org> wrote: > > From: Johannes Thumshirn <johannes.thumshirn@wdc.com> > > Simplify the I/O completion path for encoded reads by using a > completion instead of a wait_queue. > > Furthermore use refcount_t instead of atomic_t for reference counting the > private data. > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Filipe Manana <fdmanana@suse.com> Looks good now, thanks. > --- > fs/btrfs/inode.c | 23 ++++++++++++----------- > 1 file changed, 12 insertions(+), 11 deletions(-) > > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c > index fdad1adee1a3..3ba78dc3abaa 100644 > --- a/fs/btrfs/inode.c > +++ b/fs/btrfs/inode.c > @@ -3,6 +3,7 @@ > * Copyright (C) 2007 Oracle. All rights reserved. > */ > > +#include "linux/completion.h" > #include <crypto/hash.h> > #include <linux/kernel.h> > #include <linux/bio.h> > @@ -9068,9 +9069,9 @@ static ssize_t btrfs_encoded_read_inline( > } > > struct btrfs_encoded_read_private { > - wait_queue_head_t wait; > + struct completion done; > void *uring_ctx; > - atomic_t pending; > + refcount_t pending_refs; > blk_status_t status; > }; > > @@ -9089,14 +9090,14 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) > */ > WRITE_ONCE(priv->status, bbio->bio.bi_status); > } > - if (atomic_dec_and_test(&priv->pending)) { > + if (refcount_dec_and_test(&priv->pending_refs)) { > int err = blk_status_to_errno(READ_ONCE(priv->status)); > > if (priv->uring_ctx) { > btrfs_uring_read_extent_endio(priv->uring_ctx, err); > kfree(priv); > } else { > - wake_up(&priv->wait); > + complete(&priv->done); > } > } > bio_put(&bbio->bio); > @@ -9116,8 +9117,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > if (!priv) > return -ENOMEM; > > - init_waitqueue_head(&priv->wait); > - atomic_set(&priv->pending, 1); > + init_completion(&priv->done); > + refcount_set(&priv->pending_refs, 1); > priv->status = 0; > priv->uring_ctx = uring_ctx; > > @@ -9130,7 +9131,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); > > if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { > - atomic_inc(&priv->pending); > + refcount_inc(&priv->pending_refs); > btrfs_submit_bbio(bbio, 0); > > bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, > @@ -9145,11 +9146,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > disk_io_size -= bytes; > } while (disk_io_size); > > - atomic_inc(&priv->pending); > + refcount_inc(&priv->pending_refs); > btrfs_submit_bbio(bbio, 0); > > if (uring_ctx) { > - if (atomic_dec_return(&priv->pending) == 0) { > + if (refcount_dec_and_test(&priv->pending_refs)) { > ret = blk_status_to_errno(READ_ONCE(priv->status)); > btrfs_uring_read_extent_endio(uring_ctx, ret); > kfree(priv); > @@ -9158,8 +9159,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > > return -EIOCBQUEUED; > } else { > - if (atomic_dec_return(&priv->pending) != 0) > - io_wait_event(priv->wait, !atomic_read(&priv->pending)); > + if (!refcount_dec_and_test(&priv->pending_refs)) > + wait_for_completion_io(&priv->done); > /* See btrfs_encoded_read_endio() for ordering. */ > ret = blk_status_to_errno(READ_ONCE(priv->status)); > kfree(priv); > -- > 2.43.0 > >
在 2024/11/14 03:46, Johannes Thumshirn 写道: > From: Johannes Thumshirn <johannes.thumshirn@wdc.com> > > Simplify the I/O completion path for encoded reads by using a > completion instead of a wait_queue. > > Furthermore use refcount_t instead of atomic_t for reference counting the > private data. > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Qu Wenruo <wqu@suse.com> Thanks, Qu > --- > fs/btrfs/inode.c | 23 ++++++++++++----------- > 1 file changed, 12 insertions(+), 11 deletions(-) > > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c > index fdad1adee1a3..3ba78dc3abaa 100644 > --- a/fs/btrfs/inode.c > +++ b/fs/btrfs/inode.c > @@ -3,6 +3,7 @@ > * Copyright (C) 2007 Oracle. All rights reserved. > */ > > +#include "linux/completion.h" > #include <crypto/hash.h> > #include <linux/kernel.h> > #include <linux/bio.h> > @@ -9068,9 +9069,9 @@ static ssize_t btrfs_encoded_read_inline( > } > > struct btrfs_encoded_read_private { > - wait_queue_head_t wait; > + struct completion done; > void *uring_ctx; > - atomic_t pending; > + refcount_t pending_refs; > blk_status_t status; > }; > > @@ -9089,14 +9090,14 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) > */ > WRITE_ONCE(priv->status, bbio->bio.bi_status); > } > - if (atomic_dec_and_test(&priv->pending)) { > + if (refcount_dec_and_test(&priv->pending_refs)) { > int err = blk_status_to_errno(READ_ONCE(priv->status)); > > if (priv->uring_ctx) { > btrfs_uring_read_extent_endio(priv->uring_ctx, err); > kfree(priv); > } else { > - wake_up(&priv->wait); > + complete(&priv->done); > } > } > bio_put(&bbio->bio); > @@ -9116,8 +9117,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > if (!priv) > return -ENOMEM; > > - init_waitqueue_head(&priv->wait); > - atomic_set(&priv->pending, 1); > + init_completion(&priv->done); > + refcount_set(&priv->pending_refs, 1); > priv->status = 0; > priv->uring_ctx = uring_ctx; > > @@ -9130,7 +9131,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); > > if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { > - atomic_inc(&priv->pending); > + refcount_inc(&priv->pending_refs); > btrfs_submit_bbio(bbio, 0); > > bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, > @@ -9145,11 +9146,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > disk_io_size -= bytes; > } while (disk_io_size); > > - atomic_inc(&priv->pending); > + refcount_inc(&priv->pending_refs); > btrfs_submit_bbio(bbio, 0); > > if (uring_ctx) { > - if (atomic_dec_return(&priv->pending) == 0) { > + if (refcount_dec_and_test(&priv->pending_refs)) { > ret = blk_status_to_errno(READ_ONCE(priv->status)); > btrfs_uring_read_extent_endio(uring_ctx, ret); > kfree(priv); > @@ -9158,8 +9159,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, > > return -EIOCBQUEUED; > } else { > - if (atomic_dec_return(&priv->pending) != 0) > - io_wait_event(priv->wait, !atomic_read(&priv->pending)); > + if (!refcount_dec_and_test(&priv->pending_refs)) > + wait_for_completion_io(&priv->done); > /* See btrfs_encoded_read_endio() for ordering. */ > ret = blk_status_to_errno(READ_ONCE(priv->status)); > kfree(priv);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fdad1adee1a3..3ba78dc3abaa 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3,6 +3,7 @@ * Copyright (C) 2007 Oracle. All rights reserved. */ +#include "linux/completion.h" #include <crypto/hash.h> #include <linux/kernel.h> #include <linux/bio.h> @@ -9068,9 +9069,9 @@ static ssize_t btrfs_encoded_read_inline( } struct btrfs_encoded_read_private { - wait_queue_head_t wait; + struct completion done; void *uring_ctx; - atomic_t pending; + refcount_t pending_refs; blk_status_t status; }; @@ -9089,14 +9090,14 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) */ WRITE_ONCE(priv->status, bbio->bio.bi_status); } - if (atomic_dec_and_test(&priv->pending)) { + if (refcount_dec_and_test(&priv->pending_refs)) { int err = blk_status_to_errno(READ_ONCE(priv->status)); if (priv->uring_ctx) { btrfs_uring_read_extent_endio(priv->uring_ctx, err); kfree(priv); } else { - wake_up(&priv->wait); + complete(&priv->done); } } bio_put(&bbio->bio); @@ -9116,8 +9117,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, if (!priv) return -ENOMEM; - init_waitqueue_head(&priv->wait); - atomic_set(&priv->pending, 1); + init_completion(&priv->done); + refcount_set(&priv->pending_refs, 1); priv->status = 0; priv->uring_ctx = uring_ctx; @@ -9130,7 +9131,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE); if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { - atomic_inc(&priv->pending); + refcount_inc(&priv->pending_refs); btrfs_submit_bbio(bbio, 0); bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info, @@ -9145,11 +9146,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, disk_io_size -= bytes; } while (disk_io_size); - atomic_inc(&priv->pending); + refcount_inc(&priv->pending_refs); btrfs_submit_bbio(bbio, 0); if (uring_ctx) { - if (atomic_dec_return(&priv->pending) == 0) { + if (refcount_dec_and_test(&priv->pending_refs)) { ret = blk_status_to_errno(READ_ONCE(priv->status)); btrfs_uring_read_extent_endio(uring_ctx, ret); kfree(priv); @@ -9158,8 +9159,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, return -EIOCBQUEUED; } else { - if (atomic_dec_return(&priv->pending) != 0) - io_wait_event(priv->wait, !atomic_read(&priv->pending)); + if (!refcount_dec_and_test(&priv->pending_refs)) + wait_for_completion_io(&priv->done); /* See btrfs_encoded_read_endio() for ordering. */ ret = blk_status_to_errno(READ_ONCE(priv->status)); kfree(priv);