[16/19] btrfs: keep track of discard reuse stats
diff mbox series

Message ID 60e557d71cb58574edbc2c429534fbfefd55df48.1570479299.git.dennis@kernel.org
State New
Headers show
Series
  • btrfs: async discard support
Related show

Commit Message

Dennis Zhou Oct. 7, 2019, 8:17 p.m. UTC
Keep track of how much we are discarding and how often we are reusing
with async discard.

Signed-off-by: Dennis Zhou <dennis@kernel.org>
---
 fs/btrfs/ctree.h            |  3 +++
 fs/btrfs/discard.c          |  5 +++++
 fs/btrfs/free-space-cache.c | 10 ++++++++++
 fs/btrfs/sysfs.c            | 36 ++++++++++++++++++++++++++++++++++++
 4 files changed, 54 insertions(+)

Comments

Josef Bacik Oct. 10, 2019, 5:13 p.m. UTC | #1
On Mon, Oct 07, 2019 at 04:17:47PM -0400, Dennis Zhou wrote:
> Keep track of how much we are discarding and how often we are reusing
> with async discard.
> 
> Signed-off-by: Dennis Zhou <dennis@kernel.org>

Reviewed-by: Josef Bacik <josef@toxicpanda.com>

Thanks,

Josef

Patch
diff mbox series

diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b5608f8dc41a..2f52b29ff74c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -453,6 +453,9 @@  struct btrfs_discard_ctl {
 	atomic_t delay;
 	atomic_t iops_limit;
 	atomic64_t bps_limit;
+	atomic64_t discard_extent_bytes;
+	atomic64_t discard_bitmap_bytes;
+	atomic64_t discard_bytes_saved;
 };
 
 /* delayed seq elem */
diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
index d99ba31e6f3b..f0088ca19d28 100644
--- a/fs/btrfs/discard.c
+++ b/fs/btrfs/discard.c
@@ -280,10 +280,12 @@  static void btrfs_discard_workfn(struct work_struct *work)
 					       cache->discard_cursor,
 					       btrfs_block_group_end(cache),
 					       minlen, maxlen, true);
+		atomic64_add(trimmed, &discard_ctl->discard_bitmap_bytes);
 	} else {
 		btrfs_trim_block_group(cache, &trimmed, cache->discard_cursor,
 				       btrfs_block_group_end(cache),
 				       minlen, true);
+		atomic64_add(trimmed, &discard_ctl->discard_extent_bytes);
 	}
 
 	discard_ctl->prev_discard = trimmed;
@@ -408,6 +410,9 @@  void btrfs_discard_init(struct btrfs_fs_info *fs_info)
 	atomic_set(&discard_ctl->delay, BTRFS_DISCARD_MAX_DELAY);
 	atomic_set(&discard_ctl->iops_limit, BTRFS_DISCARD_MAX_IOPS);
 	atomic64_set(&discard_ctl->bps_limit, 0);
+	atomic64_set(&discard_ctl->discard_extent_bytes, 0);
+	atomic64_set(&discard_ctl->discard_bitmap_bytes, 0);
+	atomic64_set(&discard_ctl->discard_bytes_saved, 0);
 }
 
 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ed35dc090df6..480119016c0d 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2773,6 +2773,8 @@  u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 			       u64 *max_extent_size)
 {
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+	struct btrfs_discard_ctl *discard_ctl =
+					&block_group->fs_info->discard_ctl;
 	struct btrfs_free_space *entry = NULL;
 	u64 bytes_search = bytes + empty_size;
 	u64 ret = 0;
@@ -2797,6 +2799,9 @@  u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 		align_gap = entry->offset;
 		align_gap_flags = entry->flags;
 
+		if (!btrfs_free_space_trimmed(entry))
+			atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
+
 		entry->offset = offset + bytes;
 		WARN_ON(entry->bytes < bytes + align_gap_len);
 
@@ -2901,6 +2906,8 @@  u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 			     u64 min_start, u64 *max_extent_size)
 {
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+	struct btrfs_discard_ctl *discard_ctl =
+					&block_group->fs_info->discard_ctl;
 	struct btrfs_free_space *entry = NULL;
 	struct rb_node *node;
 	u64 ret = 0;
@@ -2965,6 +2972,9 @@  u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 
 	spin_lock(&ctl->tree_lock);
 
+	if (!btrfs_free_space_trimmed(entry))
+		atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
+
 	ctl->free_space -= bytes;
 	if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
 		ctl->discardable_bytes[0] -= bytes;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 6fc4d644401b..29a290d75492 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -551,11 +551,47 @@  static ssize_t btrfs_discard_bps_limit_store(struct kobject *kobj,
 BTRFS_ATTR_RW(discard, bps_limit, btrfs_discard_bps_limit_show,
 	      btrfs_discard_bps_limit_store);
 
+static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj,
+					struct kobj_attribute *a,
+					char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n",
+		atomic64_read(&fs_info->discard_ctl.discard_extent_bytes));
+}
+BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show);
+
+static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj,
+					struct kobj_attribute *a,
+					char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n",
+		atomic64_read(&fs_info->discard_ctl.discard_bitmap_bytes));
+}
+BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show);
+
+static ssize_t btrfs_discard_bytes_saved_show(struct kobject *kobj,
+					      struct kobj_attribute *a,
+					      char *buf)
+{
+	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n",
+		atomic64_read(&fs_info->discard_ctl.discard_bytes_saved));
+}
+BTRFS_ATTR(discard, discard_bytes_saved, btrfs_discard_bytes_saved_show);
+
 static const struct attribute *discard_attrs[] = {
 	BTRFS_ATTR_PTR(discard, discard_extents),
 	BTRFS_ATTR_PTR(discard, discardable_bytes),
 	BTRFS_ATTR_PTR(discard, iops_limit),
 	BTRFS_ATTR_PTR(discard, bps_limit),
+	BTRFS_ATTR_PTR(discard, discard_extent_bytes),
+	BTRFS_ATTR_PTR(discard, discard_bitmap_bytes),
+	BTRFS_ATTR_PTR(discard, discard_bytes_saved),
 	NULL,
 };