From patchwork Thu Jul 11 14:57:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gao Xiang X-Patchwork-Id: 11040229 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1C8DA13A4 for ; Thu, 11 Jul 2019 14:59:46 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 0BAD328B17 for ; Thu, 11 Jul 2019 14:59:46 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id F3D922856D; Thu, 11 Jul 2019 14:59:45 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 7816428B17 for ; Thu, 11 Jul 2019 14:59:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1729336AbfGKO73 (ORCPT ); Thu, 11 Jul 2019 10:59:29 -0400 Received: from szxga05-in.huawei.com ([45.249.212.191]:2259 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1729228AbfGKO6r (ORCPT ); Thu, 11 Jul 2019 10:58:47 -0400 Received: from DGGEMS409-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 6EE2FD279E13BC1173C9; Thu, 11 Jul 2019 22:58:43 +0800 (CST) Received: from architecture4.huawei.com (10.140.130.215) by smtp.huawei.com (10.3.19.209) with Microsoft SMTP Server (TLS) id 14.3.439.0; Thu, 11 Jul 2019 22:58:35 +0800 From: Gao Xiang To: Alexander Viro , Greg Kroah-Hartman , Andrew Morton , Theodore Ts'o , Linus Torvalds CC: , , LKML , , Chao Yu , Miao Xie , Li Guifu , Fang Wei , Gao Xiang Subject: [PATCH v2 15/24] erofs: introduce erofs shrinker Date: Thu, 11 Jul 2019 22:57:46 +0800 Message-ID: <20190711145755.33908-16-gaoxiang25@huawei.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190711145755.33908-1-gaoxiang25@huawei.com> References: <20190711145755.33908-1-gaoxiang25@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.140.130.215] X-CFilter-Loop: Reflected Sender: linux-fsdevel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-fsdevel@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch adds a dedicated shrinker targeting to free unneeded memory consumed by a number of erofs in-memory data structures. Like F2FS and UBIFS, it also adds: - sbi->umount_mutex to avoid races on shrinker and put_super; - sbi->shrinker_run_no to not revisit recently scanned objects. Signed-off-by: Gao Xiang --- fs/erofs/internal.h | 7 ++++ fs/erofs/super.c | 6 +++ fs/erofs/utils.c | 94 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 106 insertions(+), 1 deletion(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 87287cd9c146..75e82fcdee08 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -63,6 +63,9 @@ struct erofs_sb_info { #ifdef CONFIG_EROFS_FS_ZIP /* list for all registered superblocks, mainly for shrinker */ struct list_head list; + struct mutex umount_mutex; + + unsigned int shrinker_run_no; #endif u32 blocks; u32 meta_blkaddr; @@ -412,9 +415,13 @@ extern const struct file_operations erofs_dir_fops; #ifdef CONFIG_EROFS_FS_ZIP void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); +int __init erofs_init_shrinker(void); +void erofs_exit_shrinker(void); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} +static inline int erofs_init_shrinker(void) { return 0; } +static inline void erofs_exit_shrinker(void) {} #endif #endif diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 60058d3c50d4..ab9d0ad94afb 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -474,6 +474,9 @@ static int __init erofs_module_init(void) if (err) goto icache_err; + err = erofs_init_shrinker(); + if (err) + goto shrinker_err; err = register_filesystem(&erofs_fs_type); if (err) goto fs_err; @@ -482,6 +485,8 @@ static int __init erofs_module_init(void) return 0; fs_err: + erofs_exit_shrinker(); +shrinker_err: erofs_exit_inode_cache(); icache_err: return err; @@ -490,6 +495,7 @@ static int __init erofs_module_init(void) static void __exit erofs_module_exit(void) { unregister_filesystem(&erofs_fs_type); + erofs_exit_shrinker(); erofs_exit_inode_cache(); infoln("successfully finalize erofs"); } diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 08c8752895bc..727d3831b5c9 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -9,6 +9,12 @@ #include "internal.h" #ifdef CONFIG_EROFS_FS_ZIP +/* global shrink count (for all mounted EROFS instances) */ +static atomic_long_t erofs_global_shrink_cnt; + +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + /* protects the mounted 'erofs_sb_list' */ static DEFINE_SPINLOCK(erofs_sb_list_lock); static LIST_HEAD(erofs_sb_list); @@ -17,6 +23,8 @@ void erofs_shrinker_register(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); + mutex_init(&sbi->umount_mutex); + spin_lock(&erofs_sb_list_lock); list_add(&sbi->list, &erofs_sb_list); spin_unlock(&erofs_sb_list_lock); @@ -24,9 +32,93 @@ void erofs_shrinker_register(struct super_block *sb) void erofs_shrinker_unregister(struct super_block *sb) { + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + mutex_lock(&sbi->umount_mutex); + /* will add shrink final handler here */ + spin_lock(&erofs_sb_list_lock); - list_del(&EROFS_SB(sb)->list); + list_del(&sbi->list); spin_unlock(&erofs_sb_list_lock); + mutex_unlock(&sbi->umount_mutex); +} + +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + return atomic_long_read(&erofs_global_shrink_cnt); } + +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct erofs_sb_info *sbi; + struct list_head *p; + + unsigned long nr = sc->nr_to_scan; + unsigned int run_no; + unsigned long freed = 0; + + spin_lock(&erofs_sb_list_lock); + do { + run_no = ++shrinker_run_no; + } while (run_no == 0); + + /* Iterate over all mounted superblocks and try to shrink them */ + p = erofs_sb_list.next; + while (p != &erofs_sb_list) { + sbi = list_entry(p, struct erofs_sb_info, list); + + /* + * We move the ones we do to the end of the list, so we stop + * when we see one we have already done. + */ + if (sbi->shrinker_run_no == run_no) + break; + + if (!mutex_trylock(&sbi->umount_mutex)) { + p = p->next; + continue; + } + + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + + /* will add shrink handler here */ + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ + p = p->next; + + /* + * Move this one to the end of the list to provide some + * fairness. + */ + list_move_tail(&sbi->list, &erofs_sb_list); + mutex_unlock(&sbi->umount_mutex); + + if (freed >= nr) + break; + } + spin_unlock(&erofs_sb_list_lock); + return freed; +} + +static struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + +int __init erofs_init_shrinker(void) +{ + return register_shrinker(&erofs_shrinker_info); +} + +void erofs_exit_shrinker(void) +{ + unregister_shrinker(&erofs_shrinker_info); +} + #endif