@@ -7892,6 +7892,8 @@ static int record_unaligned_extent_rec(struct extent_record *rec)
rbtree_postorder_for_each_entry_safe(back, tmp,
&rec->backref_tree, node) {
+ bool skip = false;
+
if (back->full_backref || !back->is_data)
continue;
@@ -7907,6 +7909,24 @@ static int record_unaligned_extent_rec(struct extent_record *rec)
if (IS_ERR_OR_NULL(dest_root))
continue;
+ /*
+ * If we repaired something and restarted we could potentially
+ * try to add this unaligned record multiple times, so check
+ * before we add a new one.
+ */
+ list_for_each_entry(urec, &dest_root->unaligned_extent_recs,
+ list) {
+ if (urec->objectid == dest_root->objectid &&
+ urec->owner == dback->owner &&
+ urec->bytenr == rec->start) {
+ skip = true;
+ break;
+ }
+ }
+
+ if (skip)
+ continue;
+
urec = malloc(sizeof(struct unaligned_extent_rec_t));
if (!urec)
return -ENOMEM;
The repair cycle in the main check will drop all of our cache and loop through again to make sure everything is still good to go. Unfortunately we record our unaligned extent records on a per-root list so they can be retrieved when we're checking the fs roots. This isn't straightforward to clean up, so instead simply check our current list of unaligned extent records when we are adding a new one to make sure we're not duplicating our efforts. This makes us able to pass 001 with my super bytes_used fix applied. Signed-off-by: Josef Bacik <josef@toxicpanda.com> --- check/main.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+)