@@ -448,6 +448,7 @@ struct btrfs_discard_ctl {
struct btrfs_block_group_cache *cache;
struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
atomic_t discard_extents;
+ atomic64_t discardable_bytes;
};
/* delayed seq elem */
@@ -300,6 +300,7 @@ void btrfs_discard_init(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
atomic_set(&discard_ctl->discard_extents, 0);
+ atomic64_set(&discard_ctl->discardable_bytes, 0);
}
void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
@@ -77,6 +77,7 @@ void btrfs_discard_update_discardable(struct btrfs_block_group_cache *cache,
{
struct btrfs_discard_ctl *discard_ctl;
s32 extents_delta;
+ s64 bytes_delta;
if (!cache || !btrfs_test_opt(cache->fs_info, DISCARD_ASYNC))
return;
@@ -88,6 +89,12 @@ void btrfs_discard_update_discardable(struct btrfs_block_group_cache *cache,
atomic_add(extents_delta, &discard_ctl->discard_extents);
ctl->discard_extents[1] = ctl->discard_extents[0];
}
+
+ bytes_delta = ctl->discardable_bytes[0] - ctl->discardable_bytes[1];
+ if (bytes_delta) {
+ atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
+ ctl->discardable_bytes[1] = ctl->discardable_bytes[0];
+ }
}
#endif
@@ -814,6 +814,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
goto free_cache;
e->bitmap_extents = count_bitmap_extents(ctl, e);
ctl->discard_extents[0] += e->bitmap_extents;
+ ctl->discardable_bytes[0] += e->bytes;
}
io_ctl_drop_pages(&io_ctl);
@@ -1636,8 +1637,10 @@ __unlink_free_space(struct btrfs_free_space_ctl *ctl,
rb_erase(&info->offset_index, &ctl->free_space_offset);
ctl->free_extents--;
- if (!info->bitmap && !btrfs_free_space_trimmed(info))
+ if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discard_extents[0]--;
+ ctl->discardable_bytes[0] -= info->bytes;
+ }
}
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
@@ -1658,8 +1661,10 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
if (ret)
return ret;
- if (!info->bitmap && !btrfs_free_space_trimmed(info))
+ if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
ctl->discard_extents[0]++;
+ ctl->discardable_bytes[0] += info->bytes;
+ }
ctl->free_space += info->bytes;
ctl->free_extents++;
@@ -1738,8 +1743,10 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
extent_delta++;
info->bitmap_extents += extent_delta;
- if (!btrfs_free_space_trimmed(info))
+ if (!btrfs_free_space_trimmed(info)) {
ctl->discard_extents[0] += extent_delta;
+ ctl->discardable_bytes[0] -= bytes;
+ }
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
@@ -1774,8 +1781,10 @@ static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
extent_delta--;
info->bitmap_extents += extent_delta;
- if (!btrfs_free_space_trimmed(info))
+ if (!btrfs_free_space_trimmed(info)) {
ctl->discard_extents[0] += extent_delta;
+ ctl->discardable_bytes[0] += bytes;
+ }
}
/*
@@ -2042,8 +2051,10 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
u64 end;
if (!(flags & BTRFS_FSC_TRIMMED)) {
- if (btrfs_free_space_trimmed(info))
+ if (btrfs_free_space_trimmed(info)) {
ctl->discard_extents[0] += info->bitmap_extents;
+ ctl->discardable_bytes[0] += info->bytes;
+ }
info->flags &= ~(BTRFS_FSC_TRIMMED | BTRFS_FSC_TRIMMING_BITMAP);
}
@@ -2657,15 +2668,19 @@ __btrfs_return_cluster_to_free_space(
bitmap = (entry->bitmap != NULL);
if (!bitmap) {
/* merging treats extents as if they were new */
- if (!btrfs_free_space_trimmed(entry))
+ if (!btrfs_free_space_trimmed(entry)) {
ctl->discard_extents[0]--;
+ ctl->discardable_bytes[0] -= entry->bytes;
+ }
try_merge_free_space(ctl, entry, false);
steal_from_bitmap(ctl, entry, false);
/* as we insert directly, update these statistics */
- if (!btrfs_free_space_trimmed(entry))
+ if (!btrfs_free_space_trimmed(entry)) {
ctl->discard_extents[0]++;
+ ctl->discardable_bytes[0] += entry->bytes;
+ }
}
tree_insert_offset(&ctl->free_space_offset,
entry->offset, &entry->offset_index, bitmap);
@@ -2950,6 +2965,8 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
spin_lock(&ctl->tree_lock);
ctl->free_space -= bytes;
+ if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
+ ctl->discardable_bytes[0] -= bytes;
if (entry->bytes == 0) {
ctl->free_extents--;
if (entry->bitmap) {
@@ -3467,6 +3484,7 @@ static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
entry->flags |= BTRFS_FSC_TRIMMED;
entry->flags &= ~BTRFS_FSC_TRIMMING_BITMAP;
ctl->discard_extents[0] -= entry->bitmap_extents;
+ ctl->discardable_bytes[0] -= entry->bytes;
}
}
@@ -41,6 +41,7 @@ struct btrfs_free_space_ctl {
int unit;
u64 start;
s32 discard_extents[2];
+ s64 discardable_bytes[2];
const struct btrfs_free_space_op *op;
void *private;
struct mutex cache_writeout_mutex;
@@ -482,8 +482,20 @@ static ssize_t btrfs_discard_extents_show(struct kobject *kobj,
}
BTRFS_ATTR(discard, discard_extents, btrfs_discard_extents_show);
+static ssize_t btrfs_discardable_bytes_show(struct kobject *kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+
+ return snprintf(buf, PAGE_SIZE, "%lld\n",
+ atomic64_read(&fs_info->discard_ctl.discardable_bytes));
+}
+BTRFS_ATTR(discard, discardable_bytes, btrfs_discardable_bytes_show);
+
static const struct attribute *discard_attrs[] = {
BTRFS_ATTR_PTR(discard, discard_extents),
+ BTRFS_ATTR_PTR(discard, discardable_bytes),
NULL,
};
Keep track of this metric so that we can understand how ahead or behind we are in discarding rate. Signed-off-by: Dennis Zhou <dennis@kernel.org> --- fs/btrfs/ctree.h | 1 + fs/btrfs/discard.c | 1 + fs/btrfs/discard.h | 7 +++++++ fs/btrfs/free-space-cache.c | 32 +++++++++++++++++++++++++------- fs/btrfs/free-space-cache.h | 1 + fs/btrfs/sysfs.c | 12 ++++++++++++ 6 files changed, 47 insertions(+), 7 deletions(-)