@@ -199,10 +199,9 @@ static struct btrfs_block_group *find_next_block_group(
static struct btrfs_block_group *peek_discard_list(
struct btrfs_discard_ctl *discard_ctl,
enum btrfs_discard_state *discard_state,
- int *discard_index)
+ int *discard_index, u64 now)
{
struct btrfs_block_group *block_group;
- const u64 now = ktime_get_ns();
spin_lock(&discard_ctl->lock);
again:
@@ -224,10 +223,7 @@ static struct btrfs_block_group *peek_discard_list(
discard_ctl->block_group = block_group;
*discard_state = block_group->discard_state;
*discard_index = block_group->discard_index;
- } else {
- block_group = NULL;
}
-
spin_unlock(&discard_ctl->lock);
return block_group;
@@ -438,13 +434,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
int discard_index = 0;
u64 trimmed = 0;
u64 minlen = 0;
+ u64 now = ktime_get_ns();
discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
block_group = peek_discard_list(discard_ctl, &discard_state,
- &discard_index);
+ &discard_index, now);
if (!block_group || !btrfs_run_discard_work(discard_ctl))
return;
+ if (now < block_group->discard_eligible_time) {
+ btrfs_discard_schedule_work(discard_ctl, false);
+ return;
+ }
/* Perform discarding */
minlen = discard_minlen[discard_index];
It might happen that bg->discard_eligible_time was changed without rescheduling, so btrfs_discard_workfn() would be awaken before that eligible time, peek_discard_list() returns null, and all discarding going to sleep until somewhat else calls for rescheduling again. Fix the stall by keeping btrfs_discard_workfn() going by always calling btrfs_discard_schedule_work() when there is any block group in the discard lists. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- fs/btrfs/discard.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)