From patchwork Mon Mar 23 10:23:50 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Qu Wenruo X-Patchwork-Id: 11452657 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A3A84159A for ; Mon, 23 Mar 2020 10:25:14 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 8BC6820722 for ; Mon, 23 Mar 2020 10:25:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727947AbgCWKZN (ORCPT ); Mon, 23 Mar 2020 06:25:13 -0400 Received: from mx2.suse.de ([195.135.220.15]:37656 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727829AbgCWKZN (ORCPT ); Mon, 23 Mar 2020 06:25:13 -0400 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id 2B073ADF1; Mon, 23 Mar 2020 10:25:11 +0000 (UTC) From: Qu Wenruo To: linux-btrfs@vger.kernel.org Cc: Josef Bacik Subject: [PATCH 14/40] btrfs: relocation: Refactor the useless nodes handling into its own function Date: Mon, 23 Mar 2020 18:23:50 +0800 Message-Id: <20200323102416.112862-15-wqu@suse.com> X-Mailer: git-send-email 2.25.2 In-Reply-To: <20200323102416.112862-1-wqu@suse.com> References: <20200323102416.112862-1-wqu@suse.com> MIME-Version: 1.0 Sender: linux-btrfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-btrfs@vger.kernel.org This patch will also add some comment for the cleanup up. Signed-off-by: Qu Wenruo Reviewed-by: Josef Bacik --- fs/btrfs/relocation.c | 112 ++++++++++++++++++++++++++++-------------- 1 file changed, 75 insertions(+), 37 deletions(-) diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 7d5b76d998d5..96732e859ff1 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1192,6 +1192,79 @@ static int finish_upper_links(struct backref_cache *cache, return 0; } +/* + * For useless nodes, do two major clean ups: + * - Cleanup the children edges and nodes + * If child node is also orphan (no parent) during cleanup, then the + * child node will also be cleaned up. + * + * - Freeing up leaves (level 0), keeps nodes detached + * For nodes, the node is still cached as "detached" + * + * Return false if @node is not in the @useless_nodes list. + * Return true if @node is in the @useless_nodes list. + */ +static bool handle_useless_nodes(struct reloc_control *rc, + struct backref_node *node) +{ + struct backref_cache *cache = &rc->backref_cache; + struct list_head *useless_node = &cache->useless_node; + bool ret = false; + + while (!list_empty(useless_node)) { + struct backref_node *cur; + + cur = list_first_entry(useless_node, struct backref_node, + list); + list_del_init(&cur->list); + + /* Only tree root nodes can be added to @useless_nodes */ + ASSERT(list_empty(&cur->upper)); + + if (cur == node) + ret = true; + + /* The node is the lowest node */ + if (cur->lowest) { + list_del_init(&cur->lower); + cur->lowest = 0; + } + + /* Cleanup the lower edges */ + while (!list_empty(&cur->lower)) { + struct backref_edge *edge; + struct backref_node *lower; + + edge = list_entry(cur->lower.next, + struct backref_edge, list[UPPER]); + list_del(&edge->list[UPPER]); + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + free_backref_edge(cache, edge); + + /* Child node is also orphan, queue for cleanup */ + if (list_empty(&lower->upper)) + list_add(&lower->list, useless_node); + } + /* Mark this block processed for relocation */ + mark_block_processed(rc, cur); + + /* + * Backref nodes for tree leaves are deleted from the cache. + * Backref nodes for upper level tree blocks are left in the + * cache to avoid unnecessary backref lookup. + */ + if (cur->level > 0) { + list_add(&cur->list, &cache->detached); + cur->detached = 1; + } else { + rb_erase(&cur->rb_node, &cache->rb_root); + free_backref_node(cache, cur); + } + } + return ret; +} + /* * build backref tree for a given tree block. root of the backref tree * corresponds the tree block, leaves of the backref tree correspond @@ -1267,43 +1340,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, goto out; } - /* - * process useless backref nodes. backref nodes for tree leaves - * are deleted from the cache. backref nodes for upper level - * tree blocks are left in the cache to avoid unnecessary backref - * lookup. - */ - while (!list_empty(&cache->useless_node)) { - upper = list_first_entry(&cache->useless_node, - struct backref_node, list); - list_del_init(&upper->list); - ASSERT(list_empty(&upper->upper)); - if (upper == node) - node = NULL; - if (upper->lowest) { - list_del_init(&upper->lower); - upper->lowest = 0; - } - while (!list_empty(&upper->lower)) { - edge = list_first_entry(&upper->lower, - struct backref_edge, list[UPPER]); - list_del(&edge->list[UPPER]); - list_del(&edge->list[LOWER]); - lower = edge->node[LOWER]; - free_backref_edge(cache, edge); - - if (list_empty(&lower->upper)) - list_add(&lower->list, &cache->useless_node); - } - mark_block_processed(rc, upper); - if (upper->level > 0) { - list_add(&upper->list, &cache->detached); - upper->detached = 1; - } else { - rb_erase(&upper->rb_node, &cache->rb_root); - free_backref_node(cache, upper); - } - } + if (handle_useless_nodes(rc, node)) + node = NULL; out: btrfs_backref_iter_free(iter); btrfs_free_path(path);