diff mbox series

[10/13] btrfs: handle uncontrolled delayed ref generation

Message ID 20200313212330.149024-11-josef@toxicpanda.com (mailing list archive)
State New, archived
Headers show
Series Throttle delayed refs based on time | expand

Commit Message

Josef Bacik March 13, 2020, 9:23 p.m. UTC
Some operations can generate way too many delayed refs, resulting in the
async flusher being unable to keep up.  To deal with this keep track of
how often we're needing to throttle the trans handles, and if it's too
much increase how many delayed refs they need to wait on each iteration.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/delayed-ref.h |  3 +++
 fs/btrfs/transaction.c | 21 +++++++++++++++++++++
 2 files changed, 24 insertions(+)
diff mbox series

Patch

diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 16cf0af91464..03590a13f86e 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -157,6 +157,9 @@  struct btrfs_delayed_ref_root {
 	atomic_t entries_run;
 	wait_queue_head_t wait;
 
+	atomic_t mult;
+	time64_t last_adjustment;
+
 	/* total number of head nodes in tree */
 	unsigned long num_heads;
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index ac77a2b805fa..6f74f9699560 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -308,6 +308,7 @@  static noinline int join_transaction(struct btrfs_fs_info *fs_info,
 	cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
 	atomic_set(&cur_trans->delayed_refs.num_entries, 0);
 	atomic_set(&cur_trans->delayed_refs.entries_run, 0);
+	atomic_set(&cur_trans->delayed_refs.mult, 1);
 	init_waitqueue_head(&cur_trans->delayed_refs.wait);
 
 	/*
@@ -902,6 +903,17 @@  btrfs_throttle_for_delayed_refs(struct btrfs_fs_info *fs_info,
 {
 	unsigned long threshold = max(refs, 1UL) +
 		atomic_read(&delayed_refs->entries_run);
+	time64_t start = ktime_get_seconds();
+
+	spin_lock(&delayed_refs->lock);
+	if (delayed_refs->last_adjustment - start >= 1) {
+		if (delayed_refs->last_adjustment)
+			atomic_inc(&delayed_refs->mult);
+		delayed_refs->last_adjustment = start;
+	}
+	spin_unlock(&delayed_refs->lock);
+	refs *= atomic_read(&delayed_refs->mult);
+
 	wait_event_interruptible(delayed_refs->wait,
 		 (atomic_read(&delayed_refs->entries_run) >= threshold) ||
 		 !btrfs_should_throttle_delayed_refs(fs_info, delayed_refs,
@@ -973,6 +985,15 @@  static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 		err = -EIO;
 	}
 
+	if (!throttle_delayed_refs && atomic_read(&cur_trans->delayed_refs.mult) > 1) {
+		time64_t start = ktime_get_seconds();
+		spin_lock(&cur_trans->delayed_refs.lock);
+		if ((start - cur_trans->delayed_refs.last_adjustment) >= 1) {
+			atomic_dec(&cur_trans->delayed_refs.mult);
+			cur_trans->delayed_refs.last_adjustment = start;
+		}
+		spin_unlock(&cur_trans->delayed_refs.lock);
+	}
 	if (run_async && !work_busy(&info->async_delayed_ref_work))
 		queue_work(system_unbound_wq,
 			   &info->async_delayed_ref_work);