Message ID | 20240813123452.2824659-13-yi.zhang@huaweicloud.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | ext4: simplify the counting and management of delalloc reserved blocks | expand |
On Tue 13-08-24 20:34:52, Zhang Yi wrote: > From: Zhang Yi <yi.zhang@huawei.com> > > When counting reserved clusters, delayed type is always equal to delonly > type now, hence drop all delonly descriptions in parameters and > comments. > > Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Looks good. Feel free to add: Reviewed-by: Jan Kara <jack@suse.cz> Honza > --- > fs/ext4/extents_status.c | 66 +++++++++++++++++++--------------------- > 1 file changed, 32 insertions(+), 34 deletions(-) > > diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c > index 68c47ecc01a5..c786691dabd3 100644 > --- a/fs/ext4/extents_status.c > +++ b/fs/ext4/extents_status.c > @@ -1067,7 +1067,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, > } > > struct rsvd_count { > - int ndelonly; > + int ndelayed; > bool first_do_lblk_found; > ext4_lblk_t first_do_lblk; > ext4_lblk_t last_do_lblk; > @@ -1093,10 +1093,10 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, > struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); > struct rb_node *node; > > - rc->ndelonly = 0; > + rc->ndelayed = 0; > > /* > - * for bigalloc, note the first delonly block in the range has not > + * for bigalloc, note the first delayed block in the range has not > * been found, record the extent containing the block to the left of > * the region to be removed, if any, and note that there's no partial > * cluster to track > @@ -1116,9 +1116,8 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, > } > > /* > - * count_rsvd - count the clusters containing delayed and not unwritten > - * (delonly) blocks in a range within an extent and add to > - * the running tally in rsvd_count > + * count_rsvd - count the clusters containing delayed blocks in a range > + * within an extent and add to the running tally in rsvd_count > * > * @inode - file containing extent > * @lblk - first block in range > @@ -1141,7 +1140,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, > WARN_ON(len <= 0); > > if (sbi->s_cluster_ratio == 1) { > - rc->ndelonly += (int) len; > + rc->ndelayed += (int) len; > return; > } > > @@ -1151,7 +1150,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, > end = lblk + (ext4_lblk_t) len - 1; > end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; > > - /* record the first block of the first delonly extent seen */ > + /* record the first block of the first delayed extent seen */ > if (!rc->first_do_lblk_found) { > rc->first_do_lblk = i; > rc->first_do_lblk_found = true; > @@ -1165,7 +1164,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, > * doesn't start with it, count it and stop tracking > */ > if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { > - rc->ndelonly++; > + rc->ndelayed++; > rc->partial = false; > } > > @@ -1175,7 +1174,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, > */ > if (EXT4_LBLK_COFF(sbi, i) != 0) { > if (end >= EXT4_LBLK_CFILL(sbi, i)) { > - rc->ndelonly++; > + rc->ndelayed++; > rc->partial = false; > i = EXT4_LBLK_CFILL(sbi, i) + 1; > } > @@ -1183,11 +1182,11 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, > > /* > * if the current cluster starts on a cluster boundary, count the > - * number of whole delonly clusters in the extent > + * number of whole delayed clusters in the extent > */ > if ((i + sbi->s_cluster_ratio - 1) <= end) { > nclu = (end - i + 1) >> sbi->s_cluster_bits; > - rc->ndelonly += nclu; > + rc->ndelayed += nclu; > i += nclu << sbi->s_cluster_bits; > } > > @@ -1247,10 +1246,9 @@ static struct pending_reservation *__pr_tree_search(struct rb_root *root, > * @rc - pointer to reserved count data > * > * The number of reservations to be released is equal to the number of > - * clusters containing delayed and not unwritten (delonly) blocks within > - * the range, minus the number of clusters still containing delonly blocks > - * at the ends of the range, and minus the number of pending reservations > - * within the range. > + * clusters containing delayed blocks within the range, minus the number of > + * clusters still containing delayed blocks at the ends of the range, and > + * minus the number of pending reservations within the range. > */ > static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > struct extent_status *right_es, > @@ -1261,33 +1259,33 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; > struct rb_node *node; > ext4_lblk_t first_lclu, last_lclu; > - bool left_delonly, right_delonly, count_pending; > + bool left_delayed, right_delayed, count_pending; > struct extent_status *es; > > if (sbi->s_cluster_ratio > 1) { > /* count any remaining partial cluster */ > if (rc->partial) > - rc->ndelonly++; > + rc->ndelayed++; > > - if (rc->ndelonly == 0) > + if (rc->ndelayed == 0) > return 0; > > first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); > last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); > > /* > - * decrease the delonly count by the number of clusters at the > - * ends of the range that still contain delonly blocks - > + * decrease the delayed count by the number of clusters at the > + * ends of the range that still contain delayed blocks - > * these clusters still need to be reserved > */ > - left_delonly = right_delonly = false; > + left_delayed = right_delayed = false; > > es = rc->left_es; > while (es && ext4_es_end(es) >= > EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { > if (ext4_es_is_delayed(es)) { > - rc->ndelonly--; > - left_delonly = true; > + rc->ndelayed--; > + left_delayed = true; > break; > } > node = rb_prev(&es->rb_node); > @@ -1295,7 +1293,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > break; > es = rb_entry(node, struct extent_status, rb_node); > } > - if (right_es && (!left_delonly || first_lclu != last_lclu)) { > + if (right_es && (!left_delayed || first_lclu != last_lclu)) { > if (end < ext4_es_end(right_es)) { > es = right_es; > } else { > @@ -1306,8 +1304,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > while (es && es->es_lblk <= > EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { > if (ext4_es_is_delayed(es)) { > - rc->ndelonly--; > - right_delonly = true; > + rc->ndelayed--; > + right_delayed = true; > break; > } > node = rb_next(&es->rb_node); > @@ -1321,21 +1319,21 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > /* > * Determine the block range that should be searched for > * pending reservations, if any. Clusters on the ends of the > - * original removed range containing delonly blocks are > + * original removed range containing delayed blocks are > * excluded. They've already been accounted for and it's not > * possible to determine if an associated pending reservation > * should be released with the information available in the > * extents status tree. > */ > if (first_lclu == last_lclu) { > - if (left_delonly | right_delonly) > + if (left_delayed | right_delayed) > count_pending = false; > else > count_pending = true; > } else { > - if (left_delonly) > + if (left_delayed) > first_lclu++; > - if (right_delonly) > + if (right_delayed) > last_lclu--; > if (first_lclu <= last_lclu) > count_pending = true; > @@ -1346,13 +1344,13 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > /* > * a pending reservation found between first_lclu and last_lclu > * represents an allocated cluster that contained at least one > - * delonly block, so the delonly total must be reduced by one > + * delayed block, so the delayed total must be reduced by one > * for each pending reservation found and released > */ > if (count_pending) { > pr = __pr_tree_search(&tree->root, first_lclu); > while (pr && pr->lclu <= last_lclu) { > - rc->ndelonly--; > + rc->ndelayed--; > node = rb_next(&pr->rb_node); > rb_erase(&pr->rb_node, &tree->root); > __free_pending(pr); > @@ -1363,7 +1361,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, > } > } > } > - return rc->ndelonly; > + return rc->ndelayed; > } > > > -- > 2.39.2 >
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 68c47ecc01a5..c786691dabd3 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1067,7 +1067,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, } struct rsvd_count { - int ndelonly; + int ndelayed; bool first_do_lblk_found; ext4_lblk_t first_do_lblk; ext4_lblk_t last_do_lblk; @@ -1093,10 +1093,10 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct rb_node *node; - rc->ndelonly = 0; + rc->ndelayed = 0; /* - * for bigalloc, note the first delonly block in the range has not + * for bigalloc, note the first delayed block in the range has not * been found, record the extent containing the block to the left of * the region to be removed, if any, and note that there's no partial * cluster to track @@ -1116,9 +1116,8 @@ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, } /* - * count_rsvd - count the clusters containing delayed and not unwritten - * (delonly) blocks in a range within an extent and add to - * the running tally in rsvd_count + * count_rsvd - count the clusters containing delayed blocks in a range + * within an extent and add to the running tally in rsvd_count * * @inode - file containing extent * @lblk - first block in range @@ -1141,7 +1140,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, WARN_ON(len <= 0); if (sbi->s_cluster_ratio == 1) { - rc->ndelonly += (int) len; + rc->ndelayed += (int) len; return; } @@ -1151,7 +1150,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, end = lblk + (ext4_lblk_t) len - 1; end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; - /* record the first block of the first delonly extent seen */ + /* record the first block of the first delayed extent seen */ if (!rc->first_do_lblk_found) { rc->first_do_lblk = i; rc->first_do_lblk_found = true; @@ -1165,7 +1164,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, * doesn't start with it, count it and stop tracking */ if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { - rc->ndelonly++; + rc->ndelayed++; rc->partial = false; } @@ -1175,7 +1174,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, */ if (EXT4_LBLK_COFF(sbi, i) != 0) { if (end >= EXT4_LBLK_CFILL(sbi, i)) { - rc->ndelonly++; + rc->ndelayed++; rc->partial = false; i = EXT4_LBLK_CFILL(sbi, i) + 1; } @@ -1183,11 +1182,11 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, /* * if the current cluster starts on a cluster boundary, count the - * number of whole delonly clusters in the extent + * number of whole delayed clusters in the extent */ if ((i + sbi->s_cluster_ratio - 1) <= end) { nclu = (end - i + 1) >> sbi->s_cluster_bits; - rc->ndelonly += nclu; + rc->ndelayed += nclu; i += nclu << sbi->s_cluster_bits; } @@ -1247,10 +1246,9 @@ static struct pending_reservation *__pr_tree_search(struct rb_root *root, * @rc - pointer to reserved count data * * The number of reservations to be released is equal to the number of - * clusters containing delayed and not unwritten (delonly) blocks within - * the range, minus the number of clusters still containing delonly blocks - * at the ends of the range, and minus the number of pending reservations - * within the range. + * clusters containing delayed blocks within the range, minus the number of + * clusters still containing delayed blocks at the ends of the range, and + * minus the number of pending reservations within the range. */ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, struct extent_status *right_es, @@ -1261,33 +1259,33 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct rb_node *node; ext4_lblk_t first_lclu, last_lclu; - bool left_delonly, right_delonly, count_pending; + bool left_delayed, right_delayed, count_pending; struct extent_status *es; if (sbi->s_cluster_ratio > 1) { /* count any remaining partial cluster */ if (rc->partial) - rc->ndelonly++; + rc->ndelayed++; - if (rc->ndelonly == 0) + if (rc->ndelayed == 0) return 0; first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); /* - * decrease the delonly count by the number of clusters at the - * ends of the range that still contain delonly blocks - + * decrease the delayed count by the number of clusters at the + * ends of the range that still contain delayed blocks - * these clusters still need to be reserved */ - left_delonly = right_delonly = false; + left_delayed = right_delayed = false; es = rc->left_es; while (es && ext4_es_end(es) >= EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { if (ext4_es_is_delayed(es)) { - rc->ndelonly--; - left_delonly = true; + rc->ndelayed--; + left_delayed = true; break; } node = rb_prev(&es->rb_node); @@ -1295,7 +1293,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, break; es = rb_entry(node, struct extent_status, rb_node); } - if (right_es && (!left_delonly || first_lclu != last_lclu)) { + if (right_es && (!left_delayed || first_lclu != last_lclu)) { if (end < ext4_es_end(right_es)) { es = right_es; } else { @@ -1306,8 +1304,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, while (es && es->es_lblk <= EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { if (ext4_es_is_delayed(es)) { - rc->ndelonly--; - right_delonly = true; + rc->ndelayed--; + right_delayed = true; break; } node = rb_next(&es->rb_node); @@ -1321,21 +1319,21 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, /* * Determine the block range that should be searched for * pending reservations, if any. Clusters on the ends of the - * original removed range containing delonly blocks are + * original removed range containing delayed blocks are * excluded. They've already been accounted for and it's not * possible to determine if an associated pending reservation * should be released with the information available in the * extents status tree. */ if (first_lclu == last_lclu) { - if (left_delonly | right_delonly) + if (left_delayed | right_delayed) count_pending = false; else count_pending = true; } else { - if (left_delonly) + if (left_delayed) first_lclu++; - if (right_delonly) + if (right_delayed) last_lclu--; if (first_lclu <= last_lclu) count_pending = true; @@ -1346,13 +1344,13 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, /* * a pending reservation found between first_lclu and last_lclu * represents an allocated cluster that contained at least one - * delonly block, so the delonly total must be reduced by one + * delayed block, so the delayed total must be reduced by one * for each pending reservation found and released */ if (count_pending) { pr = __pr_tree_search(&tree->root, first_lclu); while (pr && pr->lclu <= last_lclu) { - rc->ndelonly--; + rc->ndelayed--; node = rb_next(&pr->rb_node); rb_erase(&pr->rb_node, &tree->root); __free_pending(pr); @@ -1363,7 +1361,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, } } } - return rc->ndelonly; + return rc->ndelayed; }