diff mbox series

[08/17] btrfs: refactor how we extract extent buffer from page for alloc_extent_buffer()

Message ID 20200908075230.86856-9-wqu@suse.com (mailing list archive)
State New, archived
Headers show
Series btrfs: add read-only support for subpage sector size | expand

Commit Message

Qu Wenruo Sept. 8, 2020, 7:52 a.m. UTC
This patch will extract the code to extract extent_buffer from
page::private into its own function, grab_extent_buffer_from_page().

Although it's just one line, for later sub-page size support it will
become way more larger.

Also add some extra comments why we need to do such page::private
dancing.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 49 ++++++++++++++++++++++++++++++++++++--------
 1 file changed, 40 insertions(+), 9 deletions(-)

Comments

Nikolay Borisov Sept. 11, 2020, 11:14 a.m. UTC | #1
On 8.09.20 г. 10:52 ч., Qu Wenruo wrote:
> This patch will extract the code to extract extent_buffer from
> page::private into its own function, grab_extent_buffer_from_page().
> 
> Although it's just one line, for later sub-page size support it will
> become way more larger.
> 
> Also add some extra comments why we need to do such page::private
> dancing.
> 
> Signed-off-by: Qu Wenruo <wqu@suse.com>
> ---
>  fs/btrfs/extent_io.c | 49 ++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 40 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index 6fafbc1d047b..3c8fe40f67fa 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -5214,6 +5214,44 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
>  }
>  #endif
>  
> +/*
> + * A helper to grab the exist extent buffer from a page.
> + *
> + * There is a small race window where two callers of alloc_extent_buffer():
> + * 		Thread 1		|	Thread 2
> + * -------------------------------------+---------------------------------------
> + * alloc_extent_buffer()		| alloc_extent_buffer()
> + * |- eb = __alloc_extent_buffer()	| |- eb = __alloc_extent_buffer()
> + * |- p = find_or_create_page()		| |- p = find_or_create_page()
> + *
> + * In above case, two ebs get allocated for the same bytenr, and got the same
> + * page.
> + * We have to rely on the page->mapping->private_lock to make one of them to
> + * give up and reuse the allocated eb:
> + *
> + * 					| |- grab_extent_buffer_from_page()
> + * 					| |- get nothing
> + * 					| |- attach_extent_buffer_page()
> + * 					| |  |- Now page->private is set
> + * 					| |- spin_unlock(&mapping->private_lock);
> + * |- spin_lock(private_lock);		| |- Continue to insert radix tree.
> + * |- grab_extent_buffer_from_page()	|
> + * |- got eb from thread 2		|
> + * |- spin_unlock(private_lock);	|
> + * |- goto free_eb;			|

This comment is slightly misleading because it's not
graB_extent_buffer_form_page's return value which decides who wins the
race but rather the retval of PagePrivate which invoked _before_
grab_extent_buffer_from_page.

> + *
> + * The function here is to ensure we have proper locking and detect such race
> + * so we won't allocating an eb twice.
> + */
> +static struct extent_buffer *grab_extent_buffer_from_page(struct page *page)
> +{
> +	/*
> +	 * For PAGE_SIZE == sectorsize case, a btree_inode page should have its
> +	 * private pointer as extent buffer who owns this page.
> +	 */
> +	return (struct extent_buffer *)page->private;
> +}
> +
>  struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
>  					  u64 start)
>  {
> @@ -5258,15 +5296,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
>  
>  		spin_lock(&mapping->private_lock);
>  		if (PagePrivate(p)) {
> -			/*
> -			 * We could have already allocated an eb for this page
> -			 * and attached one so lets see if we can get a ref on
> -			 * the existing eb, and if we can we know it's good and
> -			 * we can just return that one, else we know we can just
> -			 * overwrite page->private.
> -			 */
> -			exists = (struct extent_buffer *)p->private;
> -			if (atomic_inc_not_zero(&exists->refs)) {
> +			exists = grab_extent_buffer_from_page(p);
> +			if (exists && atomic_inc_not_zero(&exists->refs)) {

grab_extent_buffer_from_page is guaranteed to return an extent buffer
due to the PagePrivate() check above. So simply doing atomic_in_not_zero
is fine.
>  				spin_unlock(&mapping->private_lock);
>  				unlock_page(p);
>  				put_page(p);
>
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6fafbc1d047b..3c8fe40f67fa 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5214,6 +5214,44 @@  struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
 }
 #endif
 
+/*
+ * A helper to grab the exist extent buffer from a page.
+ *
+ * There is a small race window where two callers of alloc_extent_buffer():
+ * 		Thread 1		|	Thread 2
+ * -------------------------------------+---------------------------------------
+ * alloc_extent_buffer()		| alloc_extent_buffer()
+ * |- eb = __alloc_extent_buffer()	| |- eb = __alloc_extent_buffer()
+ * |- p = find_or_create_page()		| |- p = find_or_create_page()
+ *
+ * In above case, two ebs get allocated for the same bytenr, and got the same
+ * page.
+ * We have to rely on the page->mapping->private_lock to make one of them to
+ * give up and reuse the allocated eb:
+ *
+ * 					| |- grab_extent_buffer_from_page()
+ * 					| |- get nothing
+ * 					| |- attach_extent_buffer_page()
+ * 					| |  |- Now page->private is set
+ * 					| |- spin_unlock(&mapping->private_lock);
+ * |- spin_lock(private_lock);		| |- Continue to insert radix tree.
+ * |- grab_extent_buffer_from_page()	|
+ * |- got eb from thread 2		|
+ * |- spin_unlock(private_lock);	|
+ * |- goto free_eb;			|
+ *
+ * The function here is to ensure we have proper locking and detect such race
+ * so we won't allocating an eb twice.
+ */
+static struct extent_buffer *grab_extent_buffer_from_page(struct page *page)
+{
+	/*
+	 * For PAGE_SIZE == sectorsize case, a btree_inode page should have its
+	 * private pointer as extent buffer who owns this page.
+	 */
+	return (struct extent_buffer *)page->private;
+}
+
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 					  u64 start)
 {
@@ -5258,15 +5296,8 @@  struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 
 		spin_lock(&mapping->private_lock);
 		if (PagePrivate(p)) {
-			/*
-			 * We could have already allocated an eb for this page
-			 * and attached one so lets see if we can get a ref on
-			 * the existing eb, and if we can we know it's good and
-			 * we can just return that one, else we know we can just
-			 * overwrite page->private.
-			 */
-			exists = (struct extent_buffer *)p->private;
-			if (atomic_inc_not_zero(&exists->refs)) {
+			exists = grab_extent_buffer_from_page(p);
+			if (exists && atomic_inc_not_zero(&exists->refs)) {
 				spin_unlock(&mapping->private_lock);
 				unlock_page(p);
 				put_page(p);