@@ -182,6 +182,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
return (block_group->start + block_group->length);
}
+static inline bool btrfs_is_block_group_data_only(
+ struct btrfs_block_group *block_group)
+{
+ return ((block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
+ !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA));
+}
+
#ifdef CONFIG_BTRFS_DEBUG
static inline int btrfs_should_fragment_free_space(
struct btrfs_block_group *block_group)
@@ -54,6 +54,13 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
struct btrfs_block_group *block_group)
{
+ /*
+ * Async discard only operates on block_groups that are explicitly for
+ * data. Mixed block_groups are not supported.
+ */
+ if (!btrfs_is_block_group_data_only(block_group))
+ return;
+
spin_lock(&discard_ctl->lock);
__add_to_discard_list(discard_ctl, block_group);
spin_unlock(&discard_ctl->lock);
@@ -166,7 +173,10 @@ static struct btrfs_block_group *peek_discard_list(
if (block_group && now > block_group->discard_eligible_time) {
if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
block_group->used != 0) {
- __add_to_discard_list(discard_ctl, block_group);
+ if (btrfs_is_block_group_data_only(block_group))
+ __add_to_discard_list(discard_ctl, block_group);
+ else
+ list_del_init(&block_group->discard_list);
goto again;
}
if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
@@ -504,7 +514,9 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
s32 extents_delta;
s64 bytes_delta;
- if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
+ if (!block_group ||
+ !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
+ !btrfs_is_block_group_data_only(block_group))
return;
discard_ctl = &block_group->fs_info->discard_ctl;