[15/23] btrfs: use ticketing for data space reservations
diff mbox series

Message ID 20200721142234.2680-16-josef@toxicpanda.com
State New
Headers show
Series
  • Change data reservations to use the ticketing infra
Related show

Commit Message

Josef Bacik July 21, 2020, 2:22 p.m. UTC
Now that we have all the infrastructure in place, use the ticketing
infrastructure to make data allocations.  This still maintains the exact
same flushing behavior, but now we're using tickets to get our
reservations satisfied.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Tested-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
---
 fs/btrfs/space-info.c | 126 +++++++++++++++++++++++-------------------
 1 file changed, 68 insertions(+), 58 deletions(-)

Comments

David Sterba Aug. 13, 2020, 4:50 p.m. UTC | #1
On Tue, Jul 21, 2020 at 10:22:26AM -0400, Josef Bacik wrote:
> +static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
> +					struct btrfs_space_info *space_info,
> +					struct reserve_ticket *ticket,
> +					const enum btrfs_flush_state *states,
> +					int states_nr)
> +{
> +	int flush_state = 0;
> +	int commit_cycles = 2;
> +
> +	while (!space_info->full) {
> +		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
> +		spin_lock(&space_info->lock);
> +		if (ticket->bytes == 0) {
> +			spin_unlock(&space_info->lock);
> +			return;
> +		}
> +		spin_unlock(&space_info->lock);
> +	}

->full is checked unlocked, IIRC this is not a problem as it's sticky
once it's 1 then it won't change until the flushing is done and
everything else locks the space infos. So a comment would be good here.
David Sterba Aug. 13, 2020, 4:51 p.m. UTC | #2
On Tue, Jul 21, 2020 at 10:22:26AM -0400, Josef Bacik wrote:
> +static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
> +					struct btrfs_space_info *space_info,
> +					struct reserve_ticket *ticket,
> +					const enum btrfs_flush_state *states,
> +					int states_nr)
> +{
> +	int flush_state = 0;
> +	int commit_cycles = 2;
> +
> +	while (!space_info->full) {
> +		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
> +		spin_lock(&space_info->lock);
> +		if (ticket->bytes == 0) {
> +			spin_unlock(&space_info->lock);
> +			return;
> +		}
> +		spin_unlock(&space_info->lock);
> +	}
> +again:
> +	while (flush_state < states_nr) {
> +		u64 flush_bytes = U64_MAX;
> +
> +		if (!commit_cycles) {
> +			if (states[flush_state] == FLUSH_DELALLOC_WAIT) {
> +				flush_state++;
> +				continue;
> +			}
> +			if (states[flush_state] == COMMIT_TRANS)
> +				flush_bytes = ticket->bytes;
> +		}
> +
> +		flush_space(fs_info, space_info, flush_bytes,
> +			    states[flush_state]);
> +		spin_lock(&space_info->lock);
> +		if (ticket->bytes == 0) {
> +			spin_unlock(&space_info->lock);
> +			return;
> +		}
> +		spin_unlock(&space_info->lock);
> +		flush_state++;
> +	}

Actually, the whole logic in this loop could use some human readable
description. There are 2 overlapping loops, one is over the states and
the other are commit cycles, with some state-conditional changes.

> +	if (commit_cycles) {
> +		commit_cycles--;
> +		flush_state = 0;
> +		goto again;
> +	}
> +}
David Sterba Aug. 13, 2020, 5:12 p.m. UTC | #3
On Thu, Aug 13, 2020 at 06:51:44PM +0200, David Sterba wrote:
> On Tue, Jul 21, 2020 at 10:22:26AM -0400, Josef Bacik wrote:
> > +static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
> > +					struct btrfs_space_info *space_info,
> > +					struct reserve_ticket *ticket,
> > +					const enum btrfs_flush_state *states,
> > +					int states_nr)
> > +{
> > +	int flush_state = 0;
> > +	int commit_cycles = 2;
> > +
> > +	while (!space_info->full) {
> > +		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
> > +		spin_lock(&space_info->lock);
> > +		if (ticket->bytes == 0) {
> > +			spin_unlock(&space_info->lock);
> > +			return;
> > +		}
> > +		spin_unlock(&space_info->lock);
> > +	}
> > +again:
> > +	while (flush_state < states_nr) {
> > +		u64 flush_bytes = U64_MAX;
> > +
> > +		if (!commit_cycles) {
> > +			if (states[flush_state] == FLUSH_DELALLOC_WAIT) {
> > +				flush_state++;
> > +				continue;
> > +			}
> > +			if (states[flush_state] == COMMIT_TRANS)
> > +				flush_bytes = ticket->bytes;
> > +		}
> > +
> > +		flush_space(fs_info, space_info, flush_bytes,
> > +			    states[flush_state]);
> > +		spin_lock(&space_info->lock);
> > +		if (ticket->bytes == 0) {
> > +			spin_unlock(&space_info->lock);
> > +			return;
> > +		}
> > +		spin_unlock(&space_info->lock);
> > +		flush_state++;
> > +	}
> 
> Actually, the whole logic in this loop could use some human readable
> description. There are 2 overlapping loops, one is over the states and
> the other are commit cycles, with some state-conditional changes.

Never mind, the code gets removed in a later patch.

Patch
diff mbox series

diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 3a61cdfe4b21..a107370d144c 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -1070,6 +1070,54 @@  static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
 	} while (flush_state < states_nr);
 }
 
+static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
+					struct btrfs_space_info *space_info,
+					struct reserve_ticket *ticket,
+					const enum btrfs_flush_state *states,
+					int states_nr)
+{
+	int flush_state = 0;
+	int commit_cycles = 2;
+
+	while (!space_info->full) {
+		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
+		spin_lock(&space_info->lock);
+		if (ticket->bytes == 0) {
+			spin_unlock(&space_info->lock);
+			return;
+		}
+		spin_unlock(&space_info->lock);
+	}
+again:
+	while (flush_state < states_nr) {
+		u64 flush_bytes = U64_MAX;
+
+		if (!commit_cycles) {
+			if (states[flush_state] == FLUSH_DELALLOC_WAIT) {
+				flush_state++;
+				continue;
+			}
+			if (states[flush_state] == COMMIT_TRANS)
+				flush_bytes = ticket->bytes;
+		}
+
+		flush_space(fs_info, space_info, flush_bytes,
+			    states[flush_state]);
+		spin_lock(&space_info->lock);
+		if (ticket->bytes == 0) {
+			spin_unlock(&space_info->lock);
+			return;
+		}
+		spin_unlock(&space_info->lock);
+		flush_state++;
+	}
+	if (commit_cycles) {
+		commit_cycles--;
+		flush_state = 0;
+		goto again;
+	}
+}
+
 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
 				struct btrfs_space_info *space_info,
 				struct reserve_ticket *ticket)
@@ -1136,6 +1184,15 @@  static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
 						evict_flush_states,
 						ARRAY_SIZE(evict_flush_states));
 		break;
+	case BTRFS_RESERVE_FLUSH_DATA:
+		priority_reclaim_data_space(fs_info, space_info, ticket,
+					data_flush_states,
+					ARRAY_SIZE(data_flush_states));
+		break;
+	case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
+		priority_reclaim_data_space(fs_info, space_info, ticket,
+					    NULL, 0);
+		break;
 	default:
 		ASSERT(0);
 		break;
@@ -1343,78 +1400,31 @@  int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
 			     enum btrfs_reserve_flush_enum flush)
 {
 	struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
-	const enum btrfs_flush_state *states = NULL;
 	u64 used;
-	int states_nr = 0;
-	int commit_cycles = 2;
 	int ret = -ENOSPC;
 
 	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
 
-	if (flush == BTRFS_RESERVE_FLUSH_DATA) {
-		states = data_flush_states;
-		states_nr = ARRAY_SIZE(data_flush_states);
-	}
-
 	spin_lock(&data_sinfo->lock);
-again:
 	used = btrfs_space_info_used(data_sinfo, true);
 
 	if (used + bytes > data_sinfo->total_bytes) {
-		u64 prev_total_bytes = data_sinfo->total_bytes;
-		int flush_state = 0;
+		struct reserve_ticket ticket;
 
+		init_waitqueue_head(&ticket.wait);
+		ticket.bytes = bytes;
+		ticket.error = 0;
+		list_add_tail(&ticket.list, &data_sinfo->priority_tickets);
+		data_sinfo->reclaim_size += bytes;
 		spin_unlock(&data_sinfo->lock);
 
-		/*
-		 * Everybody can force chunk allocation, so try this first to
-		 * see if we can just bail here and make our reservation.
-		 */
-		flush_space(fs_info, data_sinfo, bytes, ALLOC_CHUNK_FORCE);
-		spin_lock(&data_sinfo->lock);
-		if (prev_total_bytes < data_sinfo->total_bytes)
-			goto again;
+		ret = handle_reserve_ticket(fs_info, data_sinfo, &ticket,
+					    flush);
+	} else {
+		btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
+		ret = 0;
 		spin_unlock(&data_sinfo->lock);
-
-		/*
-		 * Cycle through the rest of the flushing options for our flush
-		 * type, then try again.
-		 */
-		while (flush_state < states_nr) {
-			u64 flush_bytes = U64_MAX;
-
-			/*
-			 * Previously we unconditionally committed the
-			 * transaction twice before finally checking against
-			 * pinned space before committing the final time.  We
-			 * also skipped flushing delalloc the final pass
-			 * through.
-			 */
-			if (!commit_cycles) {
-				if (states[flush_state] == FLUSH_DELALLOC_WAIT) {
-					flush_state++;
-					continue;
-				}
-				if (states[flush_state] == COMMIT_TRANS)
-					flush_bytes = bytes;
-			}
-
-			flush_space(fs_info, data_sinfo, flush_bytes,
-				    states[flush_state]);
-			flush_state++;
-		}
-
-		if (!commit_cycles)
-			goto out;
-
-		commit_cycles--;
-		spin_lock(&data_sinfo->lock);
-		goto again;
 	}
-	btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
-	ret = 0;
-	spin_unlock(&data_sinfo->lock);
-out:
 	if (ret)
 		trace_btrfs_space_reservation(fs_info,
 					      "space_info:enospc",