diff mbox series

[v2,08/12] btrfs: make lock_and_cleanup_extent_if_need nowait compatible

Message ID 20220908002616.3189675-9-shr@fb.com (mailing list archive)
State New, archived
Headers show
Series io-uring/btrfs: support async buffered writes | expand

Commit Message

Stefan Roesch Sept. 8, 2022, 12:26 a.m. UTC
This adds the nowait parameter to lock_and_cleanup_extent_if_need(). If
the nowait parameter is specified we try to lock the extent in nowait
mode.

Signed-off-by: Stefan Roesch <shr@fb.com>
---
 fs/btrfs/file.c | 18 +++++++++++++++---
 1 file changed, 15 insertions(+), 3 deletions(-)

Comments

Filipe Manana Sept. 8, 2022, 10:17 a.m. UTC | #1
On Thu, Sep 8, 2022 at 1:26 AM Stefan Roesch <shr@fb.com> wrote:
>
> This adds the nowait parameter to lock_and_cleanup_extent_if_need(). If
> the nowait parameter is specified we try to lock the extent in nowait
> mode.
>
> Signed-off-by: Stefan Roesch <shr@fb.com>
> ---
>  fs/btrfs/file.c | 18 +++++++++++++++---
>  1 file changed, 15 insertions(+), 3 deletions(-)
>
> diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
> index a154a3cec44b..4e1745e585cb 100644
> --- a/fs/btrfs/file.c
> +++ b/fs/btrfs/file.c
> @@ -1440,7 +1440,7 @@ static noinline int
>  lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
>                                 size_t num_pages, loff_t pos,
>                                 size_t write_bytes,
> -                               u64 *lockstart, u64 *lockend,
> +                               u64 *lockstart, u64 *lockend, bool nowait,
>                                 struct extent_state **cached_state)
>  {
>         struct btrfs_fs_info *fs_info = inode->root->fs_info;
> @@ -1455,8 +1455,20 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
>         if (start_pos < inode->vfs_inode.i_size) {
>                 struct btrfs_ordered_extent *ordered;
>
> -               lock_extent_bits(&inode->io_tree, start_pos, last_pos,
> +               if (nowait) {
> +                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
> +                               for (i = 0; i < num_pages; i++) {
> +                                       unlock_page(pages[i]);
> +                                       put_page(pages[i]);

Since this is a non-local array, I'd prefer if we also set pages[i] to NULL.
That may help prevent hard to debug bugs in the future.

Thanks.


> +                               }
> +
> +                               return -EAGAIN;
> +                       }
> +               } else {
> +                       lock_extent_bits(&inode->io_tree, start_pos, last_pos,
>                                 cached_state);
> +               }
> +
>                 ordered = btrfs_lookup_ordered_range(inode, start_pos,
>                                                      last_pos - start_pos + 1);
>                 if (ordered &&
> @@ -1755,7 +1767,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>                 extents_locked = lock_and_cleanup_extent_if_need(
>                                 BTRFS_I(inode), pages,
>                                 num_pages, pos, write_bytes, &lockstart,
> -                               &lockend, &cached_state);
> +                               &lockend, false, &cached_state);
>                 if (extents_locked < 0) {
>                         if (extents_locked == -EAGAIN)
>                                 goto again;
> --
> 2.30.2
>
Stefan Roesch Sept. 8, 2022, 6:38 p.m. UTC | #2
On 9/8/22 3:17 AM, Filipe Manana wrote:
> > 
> On Thu, Sep 8, 2022 at 1:26 AM Stefan Roesch <shr@fb.com> wrote:
>>
>> This adds the nowait parameter to lock_and_cleanup_extent_if_need(). If
>> the nowait parameter is specified we try to lock the extent in nowait
>> mode.
>>
>> Signed-off-by: Stefan Roesch <shr@fb.com>
>> ---
>>  fs/btrfs/file.c | 18 +++++++++++++++---
>>  1 file changed, 15 insertions(+), 3 deletions(-)
>>
>> diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
>> index a154a3cec44b..4e1745e585cb 100644
>> --- a/fs/btrfs/file.c
>> +++ b/fs/btrfs/file.c
>> @@ -1440,7 +1440,7 @@ static noinline int
>>  lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
>>                                 size_t num_pages, loff_t pos,
>>                                 size_t write_bytes,
>> -                               u64 *lockstart, u64 *lockend,
>> +                               u64 *lockstart, u64 *lockend, bool nowait,
>>                                 struct extent_state **cached_state)
>>  {
>>         struct btrfs_fs_info *fs_info = inode->root->fs_info;
>> @@ -1455,8 +1455,20 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
>>         if (start_pos < inode->vfs_inode.i_size) {
>>                 struct btrfs_ordered_extent *ordered;
>>
>> -               lock_extent_bits(&inode->io_tree, start_pos, last_pos,
>> +               if (nowait) {
>> +                       if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
>> +                               for (i = 0; i < num_pages; i++) {
>> +                                       unlock_page(pages[i]);
>> +                                       put_page(pages[i]);
> 
> Since this is a non-local array, I'd prefer if we also set pages[i] to NULL.
> That may help prevent hard to debug bugs in the future.
> 
> Thanks.
> 
> 

I set pages[i] to null in the next version of the patch series.

>> +                               }
>> +
>> +                               return -EAGAIN;
>> +                       }
>> +               } else {
>> +                       lock_extent_bits(&inode->io_tree, start_pos, last_pos,
>>                                 cached_state);
>> +               }
>> +
>>                 ordered = btrfs_lookup_ordered_range(inode, start_pos,
>>                                                      last_pos - start_pos + 1);
>>                 if (ordered &&
>> @@ -1755,7 +1767,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
>>                 extents_locked = lock_and_cleanup_extent_if_need(
>>                                 BTRFS_I(inode), pages,
>>                                 num_pages, pos, write_bytes, &lockstart,
>> -                               &lockend, &cached_state);
>> +                               &lockend, false, &cached_state);
>>                 if (extents_locked < 0) {
>>                         if (extents_locked == -EAGAIN)
>>                                 goto again;
>> --
>> 2.30.2
>>
diff mbox series

Patch

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a154a3cec44b..4e1745e585cb 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1440,7 +1440,7 @@  static noinline int
 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 				size_t num_pages, loff_t pos,
 				size_t write_bytes,
-				u64 *lockstart, u64 *lockend,
+				u64 *lockstart, u64 *lockend, bool nowait,
 				struct extent_state **cached_state)
 {
 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
@@ -1455,8 +1455,20 @@  lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 	if (start_pos < inode->vfs_inode.i_size) {
 		struct btrfs_ordered_extent *ordered;
 
-		lock_extent_bits(&inode->io_tree, start_pos, last_pos,
+		if (nowait) {
+			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
+				for (i = 0; i < num_pages; i++) {
+					unlock_page(pages[i]);
+					put_page(pages[i]);
+				}
+
+				return -EAGAIN;
+			}
+		} else {
+			lock_extent_bits(&inode->io_tree, start_pos, last_pos,
 				cached_state);
+		}
+
 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
 						     last_pos - start_pos + 1);
 		if (ordered &&
@@ -1755,7 +1767,7 @@  static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
 		extents_locked = lock_and_cleanup_extent_if_need(
 				BTRFS_I(inode), pages,
 				num_pages, pos, write_bytes, &lockstart,
-				&lockend, &cached_state);
+				&lockend, false, &cached_state);
 		if (extents_locked < 0) {
 			if (extents_locked == -EAGAIN)
 				goto again;