diff mbox series

[RFC,2/3] btrfs: zoned: reserve relocation zone on mount

Message ID 20240328-hans-v1-2-4cd558959407@kernel.org (mailing list archive)
State New, archived
Headers show
Series btrfs: zoned: reclaim block-groups more aggressively | expand

Commit Message

Johannes Thumshirn March 28, 2024, 1:56 p.m. UTC
From: Johannes Thumshirn <johannes.thumshirn@wdc.com>

Reserve one zone as a data relocation target on each mount. If we already
find one empty block group, there's no need to force a chunk allocation,
but we can use this empty data block group as our relocation target.

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
 fs/btrfs/disk-io.c |  2 ++
 fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/zoned.h   |  3 +++
 3 files changed, 51 insertions(+)

Comments

Damien Le Moal March 28, 2024, 11:05 p.m. UTC | #1
On 3/28/24 22:56, Johannes Thumshirn wrote:
> From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> 
> Reserve one zone as a data relocation target on each mount. If we already
> find one empty block group, there's no need to force a chunk allocation,
> but we can use this empty data block group as our relocation target.
> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> ---
>  fs/btrfs/disk-io.c |  2 ++
>  fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
>  fs/btrfs/zoned.h   |  3 +++
>  3 files changed, 51 insertions(+)
> 
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index 5a35c2c0bbc9..83b56f109d29 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -3550,6 +3550,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
>  	}
>  	btrfs_discard_resume(fs_info);
>  
> +	btrfs_reserve_relocation_zone(fs_info);
> +
>  	if (fs_info->uuid_root &&
>  	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
>  	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
> diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> index d51faf7f4162..fb8707f4cab5 100644
> --- a/fs/btrfs/zoned.c
> +++ b/fs/btrfs/zoned.c
> @@ -17,6 +17,7 @@
>  #include "fs.h"
>  #include "accessors.h"
>  #include "bio.h"
> +#include "transaction.h"
>  
>  /* Maximum number of zones to report per blkdev_report_zones() call */
>  #define BTRFS_REPORT_NR_ZONES   4096
> @@ -2634,3 +2635,48 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
>  	}
>  	spin_unlock(&fs_info->zone_active_bgs_lock);
>  }
> +
> +static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
> +{
> +	struct btrfs_block_group *bg;
> +
> +	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
> +		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
> +			if (bg->used == 0)
> +				return bg->start;
> +		}
> +	}
> +
> +	return 0;

The first block group does not start at offset 0 ? If it does, then this is not
correct. Maybe turn this function into returning a bool and add a pointer
argument to return the bg start value ?

> +}
> +
> +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
> +{
> +	struct btrfs_root *tree_root = fs_info->tree_root;
> +	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
> +	struct btrfs_trans_handle *trans;
> +	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
> +	u64 bytenr = 0;
> +
> +	if (!btrfs_is_zoned(fs_info))
> +		return;
> +
> +	bytenr = find_empty_block_group(sinfo);
> +	if (!bytenr) {
> +		int ret;
> +
> +		trans = btrfs_join_transaction(tree_root);
> +		if (IS_ERR(trans))
> +			return;
> +
> +		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
> +		btrfs_end_transaction(trans);
> +
> +		if (!ret)
> +			bytenr = find_empty_block_group(sinfo);

What if this fail again ?

> +	}
> +
> +	spin_lock(&fs_info->relocation_bg_lock);
> +	fs_info->data_reloc_bg = bytenr;
> +	spin_unlock(&fs_info->relocation_bg_lock);
> +}
> diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
> index 77c4321e331f..048ffada4549 100644
> --- a/fs/btrfs/zoned.h
> +++ b/fs/btrfs/zoned.h
> @@ -97,6 +97,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
>  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
>  				struct btrfs_space_info *space_info, bool do_finish);
>  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
> +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
>  #else /* CONFIG_BLK_DEV_ZONED */
>  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
>  				     struct blk_zone *zone)
> @@ -271,6 +272,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
>  
>  static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
>  
> +static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
> +
>  #endif
>  
>  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
>
Naohiro Aota April 2, 2024, 6:03 a.m. UTC | #2
On Fri, Mar 29, 2024 at 08:05:34AM +0900, Damien Le Moal wrote:
> On 3/28/24 22:56, Johannes Thumshirn wrote:
> > From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > 
> > Reserve one zone as a data relocation target on each mount. If we already
> > find one empty block group, there's no need to force a chunk allocation,
> > but we can use this empty data block group as our relocation target.
> > 
> > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > ---
> >  fs/btrfs/disk-io.c |  2 ++
> >  fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
> >  fs/btrfs/zoned.h   |  3 +++
> >  3 files changed, 51 insertions(+)
> > 
> > diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> > index 5a35c2c0bbc9..83b56f109d29 100644
> > --- a/fs/btrfs/disk-io.c
> > +++ b/fs/btrfs/disk-io.c
> > @@ -3550,6 +3550,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
> >  	}
> >  	btrfs_discard_resume(fs_info);
> >  
> > +	btrfs_reserve_relocation_zone(fs_info);
> > +
> >  	if (fs_info->uuid_root &&
> >  	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
> >  	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
> > diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> > index d51faf7f4162..fb8707f4cab5 100644
> > --- a/fs/btrfs/zoned.c
> > +++ b/fs/btrfs/zoned.c
> > @@ -17,6 +17,7 @@
> >  #include "fs.h"
> >  #include "accessors.h"
> >  #include "bio.h"
> > +#include "transaction.h"
> >  
> >  /* Maximum number of zones to report per blkdev_report_zones() call */
> >  #define BTRFS_REPORT_NR_ZONES   4096
> > @@ -2634,3 +2635,48 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
> >  	}
> >  	spin_unlock(&fs_info->zone_active_bgs_lock);
> >  }
> > +
> > +static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
> > +{
> > +	struct btrfs_block_group *bg;
> > +
> > +	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
> > +		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
> > +			if (bg->used == 0)
> > +				return bg->start;
> > +		}
> > +	}
> > +
> > +	return 0;
> 
> The first block group does not start at offset 0 ? If it does, then this is not
> correct. Maybe turn this function into returning a bool and add a pointer
> argument to return the bg start value ?

No, it does not. The bg->start (logical address) increases monotonically as
a new block group is created. And, the first block group created by
mkfs.btrfs has a non-zero bg->start address.

> > +}
> > +
> > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
> > +{
> > +	struct btrfs_root *tree_root = fs_info->tree_root;
> > +	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
> > +	struct btrfs_trans_handle *trans;
> > +	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
> > +	u64 bytenr = 0;
> > +
> > +	if (!btrfs_is_zoned(fs_info))
> > +		return;
> > +
> > +	bytenr = find_empty_block_group(sinfo);
> > +	if (!bytenr) {
> > +		int ret;
> > +
> > +		trans = btrfs_join_transaction(tree_root);
> > +		if (IS_ERR(trans))
> > +			return;
> > +
> > +		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
> > +		btrfs_end_transaction(trans);
> > +
> > +		if (!ret)
> > +			bytenr = find_empty_block_group(sinfo);
> 
> What if this fail again ?

That (almost) means we don't have any free space to create a new block
group. Then, we don't have a way to deal with it. Maybe, we can reclaim
directly here?

Anyway, in that case, we will set fs_info->data_reloc_bg = 0, which is the
same behavior as the current code.

> > +	}
> > +
> > +	spin_lock(&fs_info->relocation_bg_lock);
> > +	fs_info->data_reloc_bg = bytenr;
> > +	spin_unlock(&fs_info->relocation_bg_lock);
> > +}
> > diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
> > index 77c4321e331f..048ffada4549 100644
> > --- a/fs/btrfs/zoned.h
> > +++ b/fs/btrfs/zoned.h
> > @@ -97,6 +97,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
> >  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> >  				struct btrfs_space_info *space_info, bool do_finish);
> >  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
> > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
> >  #else /* CONFIG_BLK_DEV_ZONED */
> >  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
> >  				     struct blk_zone *zone)
> > @@ -271,6 +272,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> >  
> >  static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
> >  
> > +static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
> > +
> >  #endif
> >  
> >  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
> > 
> 
> -- 
> Damien Le Moal
> Western Digital Research
>
Boris Burkov April 2, 2024, 5:04 p.m. UTC | #3
On Tue, Apr 02, 2024 at 06:03:35AM +0000, Naohiro Aota wrote:
> On Fri, Mar 29, 2024 at 08:05:34AM +0900, Damien Le Moal wrote:
> > On 3/28/24 22:56, Johannes Thumshirn wrote:
> > > From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > > 
> > > Reserve one zone as a data relocation target on each mount. If we already
> > > find one empty block group, there's no need to force a chunk allocation,
> > > but we can use this empty data block group as our relocation target.

I'm confused why it's sufficient to ensure the reservation only once at
mount. What ensures that the fs is in a condition to handle needed
relocations a month later after we have already made use of the one bg
we reserved at mount? Do we always reserve the "just-relocated-out-of"
fresh one for future relocations or something? I couldn't infer that
from a quick look at the use-sites of data_reloc_bg, but I could have
easily missed it.

> > > 
> > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > > ---
> > >  fs/btrfs/disk-io.c |  2 ++
> > >  fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
> > >  fs/btrfs/zoned.h   |  3 +++
> > >  3 files changed, 51 insertions(+)
> > > 
> > > diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> > > index 5a35c2c0bbc9..83b56f109d29 100644
> > > --- a/fs/btrfs/disk-io.c
> > > +++ b/fs/btrfs/disk-io.c
> > > @@ -3550,6 +3550,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
> > >  	}
> > >  	btrfs_discard_resume(fs_info);
> > >  
> > > +	btrfs_reserve_relocation_zone(fs_info);
> > > +
> > >  	if (fs_info->uuid_root &&
> > >  	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
> > >  	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
> > > diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> > > index d51faf7f4162..fb8707f4cab5 100644
> > > --- a/fs/btrfs/zoned.c
> > > +++ b/fs/btrfs/zoned.c
> > > @@ -17,6 +17,7 @@
> > >  #include "fs.h"
> > >  #include "accessors.h"
> > >  #include "bio.h"
> > > +#include "transaction.h"
> > >  
> > >  /* Maximum number of zones to report per blkdev_report_zones() call */
> > >  #define BTRFS_REPORT_NR_ZONES   4096
> > > @@ -2634,3 +2635,48 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
> > >  	}
> > >  	spin_unlock(&fs_info->zone_active_bgs_lock);
> > >  }
> > > +
> > > +static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
> > > +{
> > > +	struct btrfs_block_group *bg;
> > > +
> > > +	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
> > > +		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
> > > +			if (bg->used == 0)
> > > +				return bg->start;
> > > +		}
> > > +	}
> > > +
> > > +	return 0;
> > 
> > The first block group does not start at offset 0 ? If it does, then this is not
> > correct. Maybe turn this function into returning a bool and add a pointer
> > argument to return the bg start value ?
> 
> No, it does not. The bg->start (logical address) increases monotonically as
> a new block group is created. And, the first block group created by
> mkfs.btrfs has a non-zero bg->start address.
> 
> > > +}
> > > +
> > > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
> > > +{
> > > +	struct btrfs_root *tree_root = fs_info->tree_root;
> > > +	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
> > > +	struct btrfs_trans_handle *trans;
> > > +	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
> > > +	u64 bytenr = 0;
> > > +
> > > +	if (!btrfs_is_zoned(fs_info))
> > > +		return;
> > > +
> > > +	bytenr = find_empty_block_group(sinfo);
> > > +	if (!bytenr) {
> > > +		int ret;
> > > +
> > > +		trans = btrfs_join_transaction(tree_root);
> > > +		if (IS_ERR(trans))
> > > +			return;
> > > +
> > > +		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
> > > +		btrfs_end_transaction(trans);
> > > +
> > > +		if (!ret)
> > > +			bytenr = find_empty_block_group(sinfo);
> > 
> > What if this fail again ?
> 
> That (almost) means we don't have any free space to create a new block
> group. Then, we don't have a way to deal with it. Maybe, we can reclaim
> directly here?

To my more general point, should we keep trying in a more sustained way
on the live fs, even if it happens to be full-full at mount?

> 
> Anyway, in that case, we will set fs_info->data_reloc_bg = 0, which is the
> same behavior as the current code.

Well right now it is only called from mount, in which case it will only
fail if we are full, since there shouldn't be concurrent allocations.

OTOH, if this does get called from some more live fs context down the
line, then this could easily race with allocations using the block
group. For that reason, I think it makes sense to either add locking,
a retry loop, or inline reclaim.

> 
> > > +	}
> > > +
> > > +	spin_lock(&fs_info->relocation_bg_lock);
> > > +	fs_info->data_reloc_bg = bytenr;
> > > +	spin_unlock(&fs_info->relocation_bg_lock);
> > > +}
> > > diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
> > > index 77c4321e331f..048ffada4549 100644
> > > --- a/fs/btrfs/zoned.h
> > > +++ b/fs/btrfs/zoned.h
> > > @@ -97,6 +97,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
> > >  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> > >  				struct btrfs_space_info *space_info, bool do_finish);
> > >  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
> > > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
> > >  #else /* CONFIG_BLK_DEV_ZONED */
> > >  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
> > >  				     struct blk_zone *zone)
> > > @@ -271,6 +272,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> > >  
> > >  static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
> > >  
> > > +static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
> > > +
> > >  #endif
> > >  
> > >  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
> > > 
> > 
> > -- 
> > Damien Le Moal
> > Western Digital Research
> >
Naohiro Aota April 5, 2024, 1:14 a.m. UTC | #4
On Thu, Mar 28, 2024 at 02:56:32PM +0100, Johannes Thumshirn wrote:
> From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> 
> Reserve one zone as a data relocation target on each mount. If we already
> find one empty block group, there's no need to force a chunk allocation,
> but we can use this empty data block group as our relocation target.
> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> ---
>  fs/btrfs/disk-io.c |  2 ++
>  fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
>  fs/btrfs/zoned.h   |  3 +++
>  3 files changed, 51 insertions(+)
> 
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index 5a35c2c0bbc9..83b56f109d29 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -3550,6 +3550,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
>  	}
>  	btrfs_discard_resume(fs_info);
>  
> +	btrfs_reserve_relocation_zone(fs_info);
> +
>  	if (fs_info->uuid_root &&
>  	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
>  	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
> diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> index d51faf7f4162..fb8707f4cab5 100644
> --- a/fs/btrfs/zoned.c
> +++ b/fs/btrfs/zoned.c
> @@ -17,6 +17,7 @@
>  #include "fs.h"
>  #include "accessors.h"
>  #include "bio.h"
> +#include "transaction.h"
>  
>  /* Maximum number of zones to report per blkdev_report_zones() call */
>  #define BTRFS_REPORT_NR_ZONES   4096
> @@ -2634,3 +2635,48 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
>  	}
>  	spin_unlock(&fs_info->zone_active_bgs_lock);
>  }
> +
> +static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
> +{
> +	struct btrfs_block_group *bg;
> +
> +	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {

This starting index prefers SINGLE to DUP/RAID profiles, which is bad. We
can use something like get_alloc_profile_by_root() to decide a proper
starting index.

> +		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
> +			if (bg->used == 0)
> +				return bg->start;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
> +{
> +	struct btrfs_root *tree_root = fs_info->tree_root;
> +	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
> +	struct btrfs_trans_handle *trans;
> +	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
> +	u64 bytenr = 0;
> +
> +	if (!btrfs_is_zoned(fs_info))
> +		return;
> +
> +	bytenr = find_empty_block_group(sinfo);
> +	if (!bytenr) {
> +		int ret;
> +
> +		trans = btrfs_join_transaction(tree_root);
> +		if (IS_ERR(trans))
> +			return;
> +
> +		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
> +		btrfs_end_transaction(trans);
> +
> +		if (!ret)
> +			bytenr = find_empty_block_group(sinfo);
> +	}
> +
> +	spin_lock(&fs_info->relocation_bg_lock);

Currently, this function is called in the mount process: there is no
relocation BG set. To prevent future misuse, I'd like to add an
ASSERT(fs_info->relocation_bg_lock == 0).

> +	fs_info->data_reloc_bg = bytenr;

We can activate that block group as well to ensure it's ready to go.

> +	spin_unlock(&fs_info->relocation_bg_lock);
> +}
> diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
> index 77c4321e331f..048ffada4549 100644
> --- a/fs/btrfs/zoned.h
> +++ b/fs/btrfs/zoned.h
> @@ -97,6 +97,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
>  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
>  				struct btrfs_space_info *space_info, bool do_finish);
>  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
> +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
>  #else /* CONFIG_BLK_DEV_ZONED */
>  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
>  				     struct blk_zone *zone)
> @@ -271,6 +272,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
>  
>  static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
>  
> +static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
> +
>  #endif
>  
>  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
> 
> -- 
> 2.35.3
>
Naohiro Aota April 5, 2024, 5:03 a.m. UTC | #5
On Tue, Apr 02, 2024 at 10:04:51AM -0700, Boris Burkov wrote:
> On Tue, Apr 02, 2024 at 06:03:35AM +0000, Naohiro Aota wrote:
> > On Fri, Mar 29, 2024 at 08:05:34AM +0900, Damien Le Moal wrote:
> > > On 3/28/24 22:56, Johannes Thumshirn wrote:
> > > > From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > > > 
> > > > Reserve one zone as a data relocation target on each mount. If we already
> > > > find one empty block group, there's no need to force a chunk allocation,
> > > > but we can use this empty data block group as our relocation target.
> 
> I'm confused why it's sufficient to ensure the reservation only once at
> mount. What ensures that the fs is in a condition to handle needed
> relocations a month later after we have already made use of the one bg
> we reserved at mount? Do we always reserve the "just-relocated-out-of"
> fresh one for future relocations or something? I couldn't infer that
> from a quick look at the use-sites of data_reloc_bg, but I could have
> easily missed it.

In general, btrfs_alloc_data_chunk_ondemand() called from
prealloc_file_extent_cluster() should be responsible for allocating a new
block group. It allocates a new block group if the space is not
enough. When a block group is filled exactly, there is a chance we don't
have data relocation block group. But, the next relocation cluster will
allocate new one.

There is a problem, however, that it only checks there is enough space for
a relocation cluster in the DATA space_info. So, even if there is no space
in the data relocation block group, we still can have enough space in the
DATA space. Then, it won't ensure the relocation cluster will be written
properly.

We would like to have a dedicated function for the zoned case. It checks
the space against the data relocation BG. When the space is not enough, it
allocates/activates a new block group or takes one existing block group,
making it a new data relocation BG. This new data relocation BG "promotion"
is also done in do_allocation_zoned(), so moving the logic out to
prealloc_file_extent_cluster() would be interesting.

There is one issue when we choose an existing block group. We cannot use an
existing BG if that BG is already "used" or "reserved" to avoid mixing
ZONE_APPEND (normal data write) and WRITE (relocation data write). This
restriction makes it difficult to use an existing BG.

Here is one interesting solution. We can freely choose an existing BG, then
we can wait enough time for on-going IOs to finish since we are in the
relocation context. It is easier to implement the logic in
prealloc_file_extent_cluster() than do_allocation_zoned() because we are
free from many locks for the extent allocation and the writeback context.

> > > > 
> > > > Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> > > > ---
> > > >  fs/btrfs/disk-io.c |  2 ++
> > > >  fs/btrfs/zoned.c   | 46 ++++++++++++++++++++++++++++++++++++++++++++++
> > > >  fs/btrfs/zoned.h   |  3 +++
> > > >  3 files changed, 51 insertions(+)
> > > > 
> > > > diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> > > > index 5a35c2c0bbc9..83b56f109d29 100644
> > > > --- a/fs/btrfs/disk-io.c
> > > > +++ b/fs/btrfs/disk-io.c
> > > > @@ -3550,6 +3550,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
> > > >  	}
> > > >  	btrfs_discard_resume(fs_info);
> > > >  
> > > > +	btrfs_reserve_relocation_zone(fs_info);
> > > > +
> > > >  	if (fs_info->uuid_root &&
> > > >  	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
> > > >  	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
> > > > diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
> > > > index d51faf7f4162..fb8707f4cab5 100644
> > > > --- a/fs/btrfs/zoned.c
> > > > +++ b/fs/btrfs/zoned.c
> > > > @@ -17,6 +17,7 @@
> > > >  #include "fs.h"
> > > >  #include "accessors.h"
> > > >  #include "bio.h"
> > > > +#include "transaction.h"
> > > >  
> > > >  /* Maximum number of zones to report per blkdev_report_zones() call */
> > > >  #define BTRFS_REPORT_NR_ZONES   4096
> > > > @@ -2634,3 +2635,48 @@ void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
> > > >  	}
> > > >  	spin_unlock(&fs_info->zone_active_bgs_lock);
> > > >  }
> > > > +
> > > > +static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
> > > > +{
> > > > +	struct btrfs_block_group *bg;
> > > > +
> > > > +	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
> > > > +		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
> > > > +			if (bg->used == 0)
> > > > +				return bg->start;
> > > > +		}
> > > > +	}
> > > > +
> > > > +	return 0;
> > > 
> > > The first block group does not start at offset 0 ? If it does, then this is not
> > > correct. Maybe turn this function into returning a bool and add a pointer
> > > argument to return the bg start value ?
> > 
> > No, it does not. The bg->start (logical address) increases monotonically as
> > a new block group is created. And, the first block group created by
> > mkfs.btrfs has a non-zero bg->start address.
> > 
> > > > +}
> > > > +
> > > > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
> > > > +{
> > > > +	struct btrfs_root *tree_root = fs_info->tree_root;
> > > > +	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
> > > > +	struct btrfs_trans_handle *trans;
> > > > +	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
> > > > +	u64 bytenr = 0;
> > > > +
> > > > +	if (!btrfs_is_zoned(fs_info))
> > > > +		return;
> > > > +
> > > > +	bytenr = find_empty_block_group(sinfo);
> > > > +	if (!bytenr) {
> > > > +		int ret;
> > > > +
> > > > +		trans = btrfs_join_transaction(tree_root);
> > > > +		if (IS_ERR(trans))
> > > > +			return;
> > > > +
> > > > +		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
> > > > +		btrfs_end_transaction(trans);
> > > > +
> > > > +		if (!ret)
> > > > +			bytenr = find_empty_block_group(sinfo);
> > > 
> > > What if this fail again ?
> > 
> > That (almost) means we don't have any free space to create a new block
> > group. Then, we don't have a way to deal with it. Maybe, we can reclaim
> > directly here?
> 
> To my more general point, should we keep trying in a more sustained way
> on the live fs, even if it happens to be full-full at mount?
> 
> > 
> > Anyway, in that case, we will set fs_info->data_reloc_bg = 0, which is the
> > same behavior as the current code.
> 
> Well right now it is only called from mount, in which case it will only
> fail if we are full, since there shouldn't be concurrent allocations.
> 
> OTOH, if this does get called from some more live fs context down the
> line, then this could easily race with allocations using the block
> group. For that reason, I think it makes sense to either add locking,
> a retry loop, or inline reclaim.

So, implementing the above logic would help for live fs context.

> > 
> > > > +	}
> > > > +
> > > > +	spin_lock(&fs_info->relocation_bg_lock);
> > > > +	fs_info->data_reloc_bg = bytenr;
> > > > +	spin_unlock(&fs_info->relocation_bg_lock);
> > > > +}
> > > > diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
> > > > index 77c4321e331f..048ffada4549 100644
> > > > --- a/fs/btrfs/zoned.h
> > > > +++ b/fs/btrfs/zoned.h
> > > > @@ -97,6 +97,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
> > > >  int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> > > >  				struct btrfs_space_info *space_info, bool do_finish);
> > > >  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
> > > > +void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
> > > >  #else /* CONFIG_BLK_DEV_ZONED */
> > > >  static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
> > > >  				     struct blk_zone *zone)
> > > > @@ -271,6 +272,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
> > > >  
> > > >  static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
> > > >  
> > > > +static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
> > > > +
> > > >  #endif
> > > >  
> > > >  static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
> > > > 
> > > 
> > > -- 
> > > Damien Le Moal
> > > Western Digital Research
> > >
diff mbox series

Patch

diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5a35c2c0bbc9..83b56f109d29 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3550,6 +3550,8 @@  int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
 	}
 	btrfs_discard_resume(fs_info);
 
+	btrfs_reserve_relocation_zone(fs_info);
+
 	if (fs_info->uuid_root &&
 	    (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
 	     fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) {
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index d51faf7f4162..fb8707f4cab5 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -17,6 +17,7 @@ 
 #include "fs.h"
 #include "accessors.h"
 #include "bio.h"
+#include "transaction.h"
 
 /* Maximum number of zones to report per blkdev_report_zones() call */
 #define BTRFS_REPORT_NR_ZONES   4096
@@ -2634,3 +2635,48 @@  void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
 	}
 	spin_unlock(&fs_info->zone_active_bgs_lock);
 }
+
+static u64 find_empty_block_group(struct btrfs_space_info *sinfo)
+{
+	struct btrfs_block_group *bg;
+
+	for (int i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+		list_for_each_entry(bg, &sinfo->block_groups[i], list) {
+			if (bg->used == 0)
+				return bg->start;
+		}
+	}
+
+	return 0;
+}
+
+void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_root *tree_root = fs_info->tree_root;
+	struct btrfs_space_info *sinfo = fs_info->data_sinfo;
+	struct btrfs_trans_handle *trans;
+	u64 flags = btrfs_get_alloc_profile(fs_info, sinfo->flags);
+	u64 bytenr = 0;
+
+	if (!btrfs_is_zoned(fs_info))
+		return;
+
+	bytenr = find_empty_block_group(sinfo);
+	if (!bytenr) {
+		int ret;
+
+		trans = btrfs_join_transaction(tree_root);
+		if (IS_ERR(trans))
+			return;
+
+		ret = btrfs_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
+		btrfs_end_transaction(trans);
+
+		if (!ret)
+			bytenr = find_empty_block_group(sinfo);
+	}
+
+	spin_lock(&fs_info->relocation_bg_lock);
+	fs_info->data_reloc_bg = bytenr;
+	spin_unlock(&fs_info->relocation_bg_lock);
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 77c4321e331f..048ffada4549 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -97,6 +97,7 @@  int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
 int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
 				struct btrfs_space_info *space_info, bool do_finish);
 void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
+void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
 				     struct blk_zone *zone)
@@ -271,6 +272,8 @@  static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
 
 static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
 
+static inline void btrfs_reserve_relocation_zone(struct btrfs_fs_info *fs_info) { }
+
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)