From patchwork Mon Jan 15 04:52:49 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honggyu Kim X-Patchwork-Id: 13519312 Received: from invmail4.hynix.com (exvmail4.skhynix.com [166.125.252.92]) by smtp.subspace.kernel.org (Postfix) with ESMTP id ED65417D2; Mon, 15 Jan 2024 04:53:03 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=sk.com X-AuditID: a67dfc5b-d6dff70000001748-4a-65a4ba2da7c0 From: Honggyu Kim To: sj@kernel.org, damon@lists.linux.dev, linux-mm@kvack.org Cc: linux-trace-kernel@vger.kernel.org, linux-kernel@vger.kernel.org, kernel_team@skhynix.com, akpm@linux-foundation.org, apopple@nvidia.com, baolin.wang@linux.alibaba.com, dave.jiang@intel.com, linmiaohe@huawei.com, lizhijian@cn.fujitsu.com, mathieu.desnoyers@efficios.com, mhiramat@kernel.org, rostedt@goodmis.org, surenb@google.com, yangx.jy@fujitsu.com, ying.huang@intel.com, ziy@nvidia.com, Honggyu Kim Subject: [RFC PATCH 1/4] mm/vmscan: refactor reclaim_pages with reclaim_or_migrate_folios Date: Mon, 15 Jan 2024 13:52:49 +0900 Message-ID: <20240115045253.1775-2-honggyu.kim@sk.com> X-Mailer: git-send-email 2.43.0.windows.1 In-Reply-To: <20240115045253.1775-1-honggyu.kim@sk.com> References: <20240115045253.1775-1-honggyu.kim@sk.com> Precedence: bulk X-Mailing-List: linux-trace-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprHIsWRmVeSWpSXmKPExsXC9ZZnoa7uriWpButb9SzmrF/DZrHrRojF /73HGC2e/P/NanHiZiObRef3pSwWl3fNYbO4t+Y/q8WR9WdZLNbdArI2nz3DbLF4uZrFvo4H TBaHv75hsph8aQGbxYspZxgtTs6azGIx++g9dgchj/8HJzF7LD39hs1jQxOQaNl3i91jwaZS j5Yjb1k9Fu95yeSxaVUnm8emT5PYPU7M+M3isfOhpceLzTMZPXqb37F5fN4kF8AXxWWTkpqT WZZapG+XwJVx6+tKtoIL/BWzr/1lb2Bs4+1i5OSQEDCRONp2khXG3rFhHzuIzSagJnHl5SSm LkYODhEBB4lVXxW6GLk4mAXOMEs0915mAakRFoiUWPruESNIDYuAqsSMnYYgJq+AmcSnx7oQ EzUlHm//CTaRU8Bc4nX7G2YQWwiopHHtN7A4r4CgxMmZT8AmMgvISzRvnc0MskpCYBO7xJX+ NYwQgyQlDq64wTKBkX8Wkp5ZSHoWMDKtYhTKzCvLTczMMdHLqMzLrNBLzs/dxAiMtWW1f6J3 MH66EHyIUYCDUYmH98ffxalCrIllxZW5hxglOJiVRHgPPl+QKsSbklhZlVqUH19UmpNafIhR moNFSZzX6Ft5ipBAemJJanZqakFqEUyWiYNTqoFxzjm2ozr/XHrjZwcu8P6dq1wfXP3/19HK g+UyPt/j61L9p9oL1vP9jr+b7b30tlO4zuympQEmWmtFp/J9fCdXmJhd43T8zIx5J+/2TmBZ cOX5F4uG1KLDZoXssXNLhDY8nZ286VLQ22UeRlxLo/Zreu4pD09qfTz7Yzsz77XkOSIfzA/3 pnYrsRRnJBpqMRcVJwIAPKj9drECAAA= X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFupjkeLIzCtJLcpLzFFi42LhmqGlp6u7a0mqwct2WYs569ewWey6EWLx f+8xRosn/3+zWpy42chm8fnZa2aLw3NPslp0fl/KYnF51xw2i3tr/rNaHFl/lsVi3S0ga/PZ M8wWi5erWezreMBkcfjrGyaLyZcWsFm8mHKG0eLkrMksFrOP3mN3EPH4f3ASs8fS02/YPDY0 AYmWfbfYPRZsKvVoOfKW1WPxnpdMHptWdbJ5bPo0id3jxIzfLB47H1p6vNg8k9Gjt/kdm8e3 2x4ei198YPL4vEkuQCCKyyYlNSezLLVI3y6BK+PW15VsBRf4K2Zf+8vewNjG28XIySEhYCKx Y8M+dhCbTUBN4srLSUxdjBwcIgIOEqu+KnQxcnEwC5xhlmjuvcwCUiMsECmx9N0jRpAaFgFV iRk7DUFMXgEziU+PdSEmako83v4TbCKngLnE6/Y3zCC2EFBJ49pvYHFeAUGJkzOfgE1kFpCX aN46m3kCI88sJKlZSFILGJlWMYpk5pXlJmbmmOoVZ2dU5mVW6CXn525iBMbSsto/E3cwfrns fohRgINRiYf3x9/FqUKsiWXFlbmHGCU4mJVEeA8+X5AqxJuSWFmVWpQfX1Sak1p8iFGag0VJ nNcrPDVBSCA9sSQ1OzW1ILUIJsvEwSnVwNgk876pYYaCp/mLZFetpBSjSN5vz6wvOFhpWQuy /unMPeHE4ly7TeJLUTmjPV/A0wTnXZNmn9M23stY9cK5MmFlwETt8EA7+/xD0gXzEnT9BK89 7epl3bDQdP6t10p1DxnOvqs83RXN9m6qUfhy/Zev7QNcilnPxj0/32gQIp4geM/p7cfPSizF GYmGWsxFxYkAsorCzqECAAA= X-CFilter-Loop: Reflected Since we will introduce reclaim_pages like functions such as demote_pages and promote_pages, the most of the code can be shared. This is a preparation patch that introduces reclaim_or_migrate_folios() to cover all the logics, but it provides a handler for the different actions. No functional changes applied. Signed-off-by: Honggyu Kim --- mm/vmscan.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index bba207f41b14..7ca2396ccc3b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2107,15 +2107,16 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list, return nr_reclaimed; } -unsigned long reclaim_pages(struct list_head *folio_list) +static unsigned long reclaim_or_migrate_folios(struct list_head *folio_list, + unsigned int (*handler)(struct list_head *, struct pglist_data *)) { int nid; - unsigned int nr_reclaimed = 0; + unsigned int nr_folios = 0; LIST_HEAD(node_folio_list); unsigned int noreclaim_flag; if (list_empty(folio_list)) - return nr_reclaimed; + return nr_folios; noreclaim_flag = memalloc_noreclaim_save(); @@ -2129,15 +2130,20 @@ unsigned long reclaim_pages(struct list_head *folio_list) continue; } - nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); + nr_folios += handler(&node_folio_list, NODE_DATA(nid)); nid = folio_nid(lru_to_folio(folio_list)); } while (!list_empty(folio_list)); - nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); + nr_folios += handler(&node_folio_list, NODE_DATA(nid)); memalloc_noreclaim_restore(noreclaim_flag); - return nr_reclaimed; + return nr_folios; +} + +unsigned long reclaim_pages(struct list_head *folio_list) +{ + return reclaim_or_migrate_folios(folio_list, reclaim_folio_list); } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, From patchwork Mon Jan 15 04:52:50 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honggyu Kim X-Patchwork-Id: 13519314 Received: from invmail4.hynix.com (exvmail4.skhynix.com [166.125.252.92]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 42B3323CC; Mon, 15 Jan 2024 04:53:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=sk.com X-AuditID: a67dfc5b-d6dff70000001748-5f-65a4ba2e01d2 From: Honggyu Kim To: sj@kernel.org, damon@lists.linux.dev, linux-mm@kvack.org Cc: linux-trace-kernel@vger.kernel.org, linux-kernel@vger.kernel.org, kernel_team@skhynix.com, akpm@linux-foundation.org, apopple@nvidia.com, baolin.wang@linux.alibaba.com, dave.jiang@intel.com, linmiaohe@huawei.com, lizhijian@cn.fujitsu.com, mathieu.desnoyers@efficios.com, mhiramat@kernel.org, rostedt@goodmis.org, surenb@google.com, yangx.jy@fujitsu.com, ying.huang@intel.com, ziy@nvidia.com, Honggyu Kim Subject: [RFC PATCH 2/4] mm/damon: introduce DAMOS_DEMOTE action for demotion Date: Mon, 15 Jan 2024 13:52:50 +0900 Message-ID: <20240115045253.1775-3-honggyu.kim@sk.com> X-Mailer: git-send-email 2.43.0.windows.1 In-Reply-To: <20240115045253.1775-1-honggyu.kim@sk.com> References: <20240115045253.1775-1-honggyu.kim@sk.com> Precedence: bulk X-Mailing-List: linux-trace-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprPIsWRmVeSWpSXmKPExsXC9ZZnoa7eriWpBu3/TC3mrF/DZrHrRojF /73HGC2e/P/NanHiZiObRef3pSwWl3fNYbO4t+Y/q8WR9WdZLNbdArI2nz3DbLF4uZrFvo4H TBaHv75hsph8aQGbxYspZxgtTs6azGIx++g9dgchj/8HJzF7LD39hs1jQxOQaNl3i91jwaZS j5Yjb1k9Fu95yeSxaVUnm8emT5PYPU7M+M3isfOhpceLzTMZPXqb37F5fN4kF8AXxWWTkpqT WZZapG+XwJXxZ9lB5oIFdhVbVr9hamBsMO5i5OSQEDCR6Py4iK2LkQPMbp6RChJmE1CTuPJy EhNIWETAQWLVV4UuRi4OZoEzzBLNvZdZQGqEBXwlJm1ZAWazCKhKnD7wmR3E5hUwk1jYco0N YrymxOPtP8HinALmEq/b3zCD2EJANY1rv0HVC0qcnPkEbA6zgLxE89bZzCDLJAQ2sUu0f//B DDFIUuLgihssExj5ZyHpmYWkZwEj0ypGocy8stzEzBwTvYzKvMwKveT83E2MwGhbVvsnegfj pwvBhxgFOBiVeHh//F2cKsSaWFZcmXuIUYKDWUmE9+DzBalCvCmJlVWpRfnxRaU5qcWHGKU5 WJTEeY2+lacICaQnlqRmp6YWpBbBZJk4OKUaGAPNeW/+9StzX5VhuWrp8zO7+HanabQkBfoe +7nEoF1qxi7lP55Vh8TWcsWG9QrGqyXaRl/k29IY2Vy9JSprRQJbk/LTTUIWFtnMO50/xyv/ T84zO1Fu4fO66jm3nxbPCfvTl5ZvuM/1JyBlv8ikZ97v/a8beH5f07bvbtvFym3JvflHDt3W U2Ipzkg01GIuKk4EAG2hjDqyAgAA X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFupnkeLIzCtJLcpLzFFi42LhmqGlp6u3a0mqwed76hZz1q9hs9h1I8Ti /95jjBZP/v9mtThxs5HN4vOz18wWh+eeZLXo/L6UxeLyrjlsFvfW/Ge1OLL+LIvFultA1uaz Z5gtFi9Xs9jX8YDJ4vDXN0wWky8tYLN4MeUMo8XJWZNZLGYfvcfuIOLx/+AkZo+lp9+weWxo AhIt+26xeyzYVOrRcuQtq8fiPS+ZPDat6mTz2PRpErvHiRm/WTx2PrT0eLF5JqNHb/M7No9v tz08Fr/4wOTxeZNcgEAUl01Kak5mWWqRvl0CV8afZQeZCxbYVWxZ/YapgbHBuIuRg0NCwESi eUZqFyMnB5uAmsSVl5OYQMIiAg4Sq74qdDFycTALnGGWaO69zAJSIyzgKzFpywowm0VAVeL0 gc/sIDavgJnEwpZrbCC2hICmxOPtP8HinALmEq/b3zCD2EJANY1rv0HVC0qcnPkEbA6zgLxE 89bZzBMYeWYhSc1CklrAyLSKUSQzryw3MTPHVK84O6MyL7NCLzk/dxMjMJqW1f6ZuIPxy2X3 Q4wCHIxKPLw//i5OFWJNLCuuzD3EKMHBrCTCe/D5glQh3pTEyqrUovz4otKc1OJDjNIcLEri vF7hqQlCAumJJanZqakFqUUwWSYOTqkGRqbaO1OuvHgSdnYul/4aucDwdGFt23Ov6vpzOu9F Bu+7kptwcMqpokCmw9/3LGPdt8xE5rD3pukGvtKG7drnbss2T/jQqPU/7lZPr+MOP9kjYbbO xw8yf9QoeNCb+GXTlrTrIgEB1aq7DxlkmEQn+J/7n6aaqHQ0sr7rw6VpH6vN50+7xm60Roml OCPRUIu5qDgRAHgQXk+iAgAA X-CFilter-Loop: Reflected This patch introduces DAMOS_DEMOTE action, which is similar to DAMOS_PAGEOUT, but demote folios instead of swapping them out. Since there are some common routines with pageout, many functions have similar logics between pageout and demote. The execution sequence of DAMOS_PAGEOUT and DAMOS_DEMOTE look as follows. DAMOS_PAGEOUT action damo_pa_apply_scheme -> damon_pa_reclaim -> reclaim_pages -> reclaim_folio_list -> shrink_folio_list DAMOS_DEMOTE action damo_pa_apply_scheme -> damon_pa_reclaim -> demote_pages -> do_demote_folio_list -> __demote_folio_list -> demote_folio_list __demote_folio_list() is a minimized version of shrink_folio_list(), but it's minified only for demotion. Signed-off-by: Honggyu Kim --- include/linux/damon.h | 2 + mm/damon/paddr.c | 17 +++++--- mm/damon/sysfs-schemes.c | 1 + mm/internal.h | 1 + mm/vmscan.c | 84 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 99 insertions(+), 6 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index e00ddf1ed39c..4c0a0fef09c5 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -106,6 +106,7 @@ struct damon_target { * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists. * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists. * @DAMOS_STAT: Do nothing but count the stat. + * @DAMOS_DEMOTE: Do demotion for the current region. * @NR_DAMOS_ACTIONS: Total number of DAMOS actions * * The support of each action is up to running &struct damon_operations. @@ -123,6 +124,7 @@ enum damos_action { DAMOS_LRU_PRIO, DAMOS_LRU_DEPRIO, DAMOS_STAT, /* Do nothing but only record the stat */ + DAMOS_DEMOTE, NR_DAMOS_ACTIONS, }; diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 081e2a325778..d3e3f077cd00 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -224,7 +224,7 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) return false; } -static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) +static unsigned long damon_pa_reclaim(struct damon_region *r, struct damos *s, bool is_demote) { unsigned long addr, applied; LIST_HEAD(folio_list); @@ -242,14 +242,17 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) folio_test_clear_young(folio); if (!folio_isolate_lru(folio)) goto put_folio; - if (folio_test_unevictable(folio)) + if (folio_test_unevictable(folio) && !is_demote) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); put_folio: folio_put(folio); } - applied = reclaim_pages(&folio_list); + if (is_demote) + applied = demote_pages(&folio_list); + else + applied = reclaim_pages(&folio_list); cond_resched(); return applied * PAGE_SIZE; } @@ -297,13 +300,15 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, { switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pa_pageout(r, scheme); + return damon_pa_reclaim(r, scheme, false); case DAMOS_LRU_PRIO: return damon_pa_mark_accessed(r, scheme); case DAMOS_LRU_DEPRIO: return damon_pa_deactivate_pages(r, scheme); case DAMOS_STAT: break; + case DAMOS_DEMOTE: + return damon_pa_reclaim(r, scheme, true); default: /* DAMOS actions that not yet supported by 'paddr'. */ break; @@ -317,11 +322,11 @@ static int damon_pa_scheme_score(struct damon_ctx *context, { switch (scheme->action) { case DAMOS_PAGEOUT: + case DAMOS_LRU_DEPRIO: + case DAMOS_DEMOTE: return damon_cold_score(context, r, scheme); case DAMOS_LRU_PRIO: return damon_hot_score(context, r, scheme); - case DAMOS_LRU_DEPRIO: - return damon_cold_score(context, r, scheme); default: break; } diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index fe0fe2562000..ac7cd3f17b12 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1187,6 +1187,7 @@ static const char * const damon_sysfs_damos_action_strs[] = { "lru_prio", "lru_deprio", "stat", + "demote", }; static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( diff --git a/mm/internal.h b/mm/internal.h index b61034bd50f5..2380397ec2f3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -869,6 +869,7 @@ extern void set_pageblock_order(void); unsigned long reclaim_pages(struct list_head *folio_list); unsigned int reclaim_clean_pages_from_list(struct zone *zone, struct list_head *folio_list); +unsigned long demote_pages(struct list_head *folio_list); /* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ca2396ccc3b..eaa3dd6b7562 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -998,6 +998,66 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); } +/* + * __demote_folio_list() returns the number of demoted pages + */ +static unsigned int __demote_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat, struct scan_control *sc) +{ + LIST_HEAD(ret_folios); + LIST_HEAD(demote_folios); + unsigned int nr_demoted = 0; + + if (next_demotion_node(pgdat->node_id) == NUMA_NO_NODE) + return 0; + + cond_resched(); + + while (!list_empty(folio_list)) { + struct folio *folio; + enum folio_references references; + + cond_resched(); + + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + + if (!folio_trylock(folio)) + goto keep; + + VM_BUG_ON_FOLIO(folio_test_active(folio), folio); + + references = folio_check_references(folio, sc); + if (references == FOLIOREF_KEEP) + goto keep_locked; + + /* Relocate its contents to another node. */ + list_add(&folio->lru, &demote_folios); + folio_unlock(folio); + continue; +keep_locked: + folio_unlock(folio); +keep: + list_add(&folio->lru, &ret_folios); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + } + /* 'folio_list' is always empty here */ + + /* Migrate folios selected for demotion */ + nr_demoted += demote_folio_list(&demote_folios, pgdat); + /* Folios that could not be demoted are still in @demote_folios */ + if (!list_empty(&demote_folios)) { + /* Folios which weren't demoted go back on @folio_list */ + list_splice_init(&demote_folios, folio_list); + } + + try_to_unmap_flush(); + + list_splice(&ret_folios, folio_list); + + return nr_demoted; +} + /* * shrink_folio_list() returns the number of reclaimed pages */ @@ -2107,6 +2167,25 @@ static unsigned int reclaim_folio_list(struct list_head *folio_list, return nr_reclaimed; } +static unsigned int do_demote_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat) +{ + unsigned int nr_demoted; + struct folio *folio; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + }; + + nr_demoted = __demote_folio_list(folio_list, pgdat, &sc); + while (!list_empty(folio_list)) { + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + folio_putback_lru(folio); + } + + return nr_demoted; +} + static unsigned long reclaim_or_migrate_folios(struct list_head *folio_list, unsigned int (*handler)(struct list_head *, struct pglist_data *)) { @@ -2146,6 +2225,11 @@ unsigned long reclaim_pages(struct list_head *folio_list) return reclaim_or_migrate_folios(folio_list, reclaim_folio_list); } +unsigned long demote_pages(struct list_head *folio_list) +{ + return reclaim_or_migrate_folios(folio_list, do_demote_folio_list); +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { From patchwork Mon Jan 15 04:52:51 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honggyu Kim X-Patchwork-Id: 13519313 Received: from invmail4.hynix.com (exvmail4.skhynix.com [166.125.252.92]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 42B0A23C9; Mon, 15 Jan 2024 04:53:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=sk.com X-AuditID: a67dfc5b-d6dff70000001748-69-65a4ba2f659a From: Honggyu Kim To: sj@kernel.org, damon@lists.linux.dev, linux-mm@kvack.org Cc: linux-trace-kernel@vger.kernel.org, linux-kernel@vger.kernel.org, kernel_team@skhynix.com, akpm@linux-foundation.org, apopple@nvidia.com, baolin.wang@linux.alibaba.com, dave.jiang@intel.com, linmiaohe@huawei.com, lizhijian@cn.fujitsu.com, mathieu.desnoyers@efficios.com, mhiramat@kernel.org, rostedt@goodmis.org, surenb@google.com, yangx.jy@fujitsu.com, ying.huang@intel.com, ziy@nvidia.com, Hyeongtak Ji Subject: [RFC PATCH 3/4] mm/memory-tiers: add next_promotion_node to find promotion target Date: Mon, 15 Jan 2024 13:52:51 +0900 Message-ID: <20240115045253.1775-4-honggyu.kim@sk.com> X-Mailer: git-send-email 2.43.0.windows.1 In-Reply-To: <20240115045253.1775-1-honggyu.kim@sk.com> References: <20240115045253.1775-1-honggyu.kim@sk.com> Precedence: bulk X-Mailing-List: linux-trace-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprPIsWRmVeSWpSXmKPExsXC9ZZnoa7BriWpBoubmCzmrF/DZrHrRojF /73HGC2e/P/NanHiZiObRef3pSwWl3fNYbO4t+Y/q8WR9WdZLNbdArI2nz3DbLF4uZrFvo4H TBaHv75hsph8aQGbxYspZxgtTs6azGIx++g9dgchj/8HJzF7LD39hs1jQxOQaNl3i91jwaZS j5Yjb1k9Fu95yeSxaVUnm8emT5PYPU7M+M3isfOhpceLzTMZPXqb37F5fN4kF8AXxWWTkpqT WZZapG+XwJWxb2UDa8Ej8Yo/ryczNzDuE+5i5OSQEDCR+HNkFTOMPevYUkYQm01ATeLKy0lM XYwcHCICDhKrvip0MXJxMAucY5bo/3IFLC4sECXx74ItSDmLgKrEj0UdrCA2r4CZxLueLiaI kZoSj7f/ZAexOQXMJV63vwFbJQRU07j2GztEvaDEyZlPWEBsZgF5ieats5lBdkkIbGKXuHXl IBvEIEmJgytusExg5J+FpGcWkp4FjEyrGIUy88pyEzNzTPQyKvMyK/SS83M3MQKjbVntn+gd jJ8uBB9iFOBgVOLh/fF3caoQa2JZcWXuIUYJDmYlEd6DzxekCvGmJFZWpRblxxeV5qQWH2KU 5mBREuc1+laeIiSQnliSmp2aWpBaBJNl4uCUamAUqFjkHJtYNEmFKdWLhY2P0yzjmX3Rrw8y 3FuOJZnFys9JW3q54Jrnp6/11e4Tpyt/mn/n6uvi9OvSgTVH5u3oe32ad82Mmxaedbp/Fu/e KN477865GTzTTj4W6LjImRmWmVC8+moP+y215J67M78eeMXv17B2n3Oql/Nz5p7kggeBeyUv J+xXYinOSDTUYi4qTgQAoUES4bICAAA= X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFupnkeLIzCtJLcpLzFFi42LhmqGlp6u/a0mqwfXzBhZz1q9hs9h1I8Ti /95jjBZP/v9mtThxs5HNovPJd0aLw3NPslp0fl/KYnF51xw2i3tr/rNaHFl/lsVi3S0ga/PZ M8wWi5erWezreMBkcfjrGyaLyZcWsFm8mHKG0eLkrMksFrOP3mN3EPH4f3ASs8fS02/YPDY0 AYmWfbfYPRZsKvVoOfKW1WPxnpdMHptWdbJ5bPo0id3jxIzfLB47H1p6vNg8k9Gjt/kdm8e3 2x4ei198YPL4vEkuQCCKyyYlNSezLLVI3y6BK2PfygbWgkfiFX9eT2ZuYNwn3MXIySEhYCIx 69hSRhCbTUBN4srLSUxdjBwcIgIOEqu+KnQxcnEwC5xjluj/cgUsLiwQJfHvgi1IOYuAqsSP RR2sIDavgJnEu54uJoiRmhKPt/9kB7E5BcwlXre/YQaxhYBqGtd+Y4eoF5Q4OfMJC4jNLCAv 0bx1NvMERp5ZSFKzkKQWMDKtYhTJzCvLTczMMdUrzs6ozMus0EvOz93ECIymZbV/Ju5g/HLZ /RCjAAejEg/vj7+LU4VYE8uKK3MPMUpwMCuJ8B58viBViDclsbIqtSg/vqg0J7X4EKM0B4uS OK9XeGqCkEB6YklqdmpqQWoRTJaJg1OqgVFvYlqLYfQvfaty+3MGra/Lrldbn5lxs/FgWkHw rcUyNSd6RMol1glfkLFMFNJfVV3YaHjvQmzkSdlGodL0GJGcxjLbDVzsHk8m9ktOyzA9kFL0 12XnrPQF5xcu2u+5hGlSfQhvwOSShpa2wz58h67veago21WaFlDhYyVXE5gSNClJdVaOEktx RqKhFnNRcSIAY1YS0aICAAA= X-CFilter-Loop: Reflected From: Hyeongtak Ji This patch adds next_promotion_node that can be used to identify the appropriate promotion target based on memory tiers. When multiple promotion target nodes are available, the nearest node is selected based on numa distance. Signed-off-by: Hyeongtak Ji --- include/linux/memory-tiers.h | 11 +++++++++ mm/memory-tiers.c | 43 ++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 1e39d27bee41..0788e435fc50 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -50,6 +50,7 @@ int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf, int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist); #ifdef CONFIG_MIGRATION int next_demotion_node(int node); +int next_promotion_node(int node); void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); bool node_is_toptier(int node); #else @@ -58,6 +59,11 @@ static inline int next_demotion_node(int node) return NUMA_NO_NODE; } +static inline int next_promotion_node(int node) +{ + return NUMA_NO_NODE; +} + static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) { *targets = NODE_MASK_NONE; @@ -101,6 +107,11 @@ static inline int next_demotion_node(int node) return NUMA_NO_NODE; } +static inline int next_promotion_node(int node) +{ + return NUMA_NO_NODE; +} + static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) { *targets = NODE_MASK_NONE; diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index 8d5291add2bc..0060ee571cf4 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -335,6 +335,49 @@ int next_demotion_node(int node) return target; } +/* + * Select a promotion target that is close to the from node among the given + * two nodes. + * + * TODO: consider other decision policy as node_distance may not be precise. + */ +static int select_promotion_target(int a, int b, int from) +{ + if (node_distance(from, a) < node_distance(from, b)) + return a; + else + return b; +} + +/** + * next_promotion_node() - Get the next node in the promotion path + * @node: The starting node to lookup the next node + * + * Return: node id for next memory node in the promotion path hierarchy + * from @node; NUMA_NO_NODE if @node is the toptier. + */ +int next_promotion_node(int node) +{ + int target = NUMA_NO_NODE; + int nid; + + if (node_is_toptier(node)) + return NUMA_NO_NODE; + + rcu_read_lock(); + for_each_node_state(nid, N_MEMORY) { + if (node_isset(node, node_demotion[nid].preferred)) { + if (target == NUMA_NO_NODE) + target = nid; + else + target = select_promotion_target(nid, target, node); + } + } + rcu_read_unlock(); + + return target; +} + static void disable_all_demotion_targets(void) { struct memory_tier *memtier; From patchwork Mon Jan 15 04:52:52 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Honggyu Kim X-Patchwork-Id: 13519315 Received: from invmail4.hynix.com (exvmail4.skhynix.com [166.125.252.92]) by smtp.subspace.kernel.org (Postfix) with ESMTP id A118663AC; Mon, 15 Jan 2024 04:53:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=sk.com X-AuditID: a67dfc5b-d6dff70000001748-74-65a4ba317e96 From: Honggyu Kim To: sj@kernel.org, damon@lists.linux.dev, linux-mm@kvack.org Cc: linux-trace-kernel@vger.kernel.org, linux-kernel@vger.kernel.org, kernel_team@skhynix.com, akpm@linux-foundation.org, apopple@nvidia.com, baolin.wang@linux.alibaba.com, dave.jiang@intel.com, linmiaohe@huawei.com, lizhijian@cn.fujitsu.com, mathieu.desnoyers@efficios.com, mhiramat@kernel.org, rostedt@goodmis.org, surenb@google.com, yangx.jy@fujitsu.com, ying.huang@intel.com, ziy@nvidia.com, Hyeongtak Ji , Honggyu Kim Subject: [RFC PATCH 4/4] mm/damon: introduce DAMOS_PROMOTE action for promotion Date: Mon, 15 Jan 2024 13:52:52 +0900 Message-ID: <20240115045253.1775-5-honggyu.kim@sk.com> X-Mailer: git-send-email 2.43.0.windows.1 In-Reply-To: <20240115045253.1775-1-honggyu.kim@sk.com> References: <20240115045253.1775-1-honggyu.kim@sk.com> Precedence: bulk X-Mailing-List: linux-trace-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprPIsWRmVeSWpSXmKPExsXC9ZZnoa7hriWpBh8filrMWb+GzWLXjRCL /3uPMVo8+f+b1eLEzUY2i87vS1ksLu+aw2Zxb81/Vosj68+yWKy7BWRtPnuG2WLxcjWLfR0P mCwOf33DZDH50gI2ixdTzjBanJw1mcVi9tF77A5CHv8PTmL2WHr6DZvHhiYg0bLvFrvHgk2l Hi1H3rJ6LN7zkslj06pONo9Nnyaxe5yY8ZvFY+dDS48Xm2cyevQ2v2Pz+LxJLoAvissmJTUn syy1SN8ugStjyrsWpoI9kRVff19hbmD859HFyMkhIWAi0bhjDyuc/amdDcRmE1CTuPJyElMX IweHiICDxKqvCl2MXBzMAp+YJW4fnskMUiMs4C+x/OJ1dhCbRUBV4sua5Wwg9bwCZhI3HjFD jNSUeLz9J1gJp4C5xOv2N2BxIaCSxrXfwOK8AoISJ2c+YQGxmQXkJZq3zmYG2SUhsI9d4tW8 NUwQgyQlDq64wTKBkX8Wkp5ZSHoWMDKtYhTKzCvLTczMMdHLqMzLrNBLzs/dxAiMtmW1f6J3 MH66EHyIUYCDUYmH98ffxalCrIllxZW5hxglOJiVRHgPPl+QKsSbklhZlVqUH19UmpNafIhR moNFSZzX6Ft5ipBAemJJanZqakFqEUyWiYNTqoGx16WGmUH5dkzwk+eNjTUlS+ultB09X6nL CW4t3PC2bmHZ8XPZ3z8tiF8x1dJn2vLYpiMTrJPWF9cYzn2quv62/exncpNzFq74F1awKnul V3Og1yuZ4uXGScfuJV/+8VB9ls/X94l35MsP628P77f9Vz7j49vfhp970ycWOzh26p883Fys vVZZiaU4I9FQi7moOBEAEJRAWbICAAA= X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprCIsWRmVeSWpSXmKPExsXCNUNLT9dg15JUg02NthZz1q9hs9h1I8Ti /95jjBZP/v9mtThxs5HN4vOz18wWnU++M1ocnnuS1aLz+1IWi8u75rBZ3Fvzn9XiyPqzLBbr bgFZm8+eYbZYvFzNYl/HAyaLw1/fMFlMvrSAzeLFlDOMFidnTWaxmH30HruDqMf/g5OYPZae fsPmsaEJSLTsu8XusWBTqUfLkbesHov3vGTy2LSqk81j06dJ7B4nZvxm8dj50NLjxeaZjB69 ze/YPL7d9vBY/OIDk8fnTXIBAlFcNimpOZllqUX6dglcGVPetTAV7Ims+Pr7CnMD4z+PLkZO DgkBE4nGT+1sIDabgJrElZeTmLoYOThEBBwkVn1V6GLk4mAW+MQscfvwTGaQGmEBf4nlF6+z g9gsAqoSX9YsZwOp5xUwk7jxiBlipKbE4+0/wUo4BcwlXre/AYsLAZU0rv0GFucVEJQ4OfMJ C4jNLCAv0bx1NvMERp5ZSFKzkKQWMDKtYhTJzCvLTczMMdUrzs6ozMus0EvOz93ECIyrZbV/ Ju5g/HLZ/RCjAAejEg/vj7+LU4VYE8uKK3MPMUpwMCuJ8B58viBViDclsbIqtSg/vqg0J7X4 EKM0B4uSOK9XeGqCkEB6YklqdmpqQWoRTJaJg1OqgbHB85rxSbF7IgVpn09/+lHssdLjbJjQ M/msy++3njB9Wjxp4ec9bh9mtk6NeLpotqLOxYyiPyxqFkl5i51mBn//v28Cx99gyWh+6X0n nH7vWL9gwiZ1B57wrcXVM80+868+NS886N+tmPj1G2tmf7ZQfit25RnjtoNGj+VDtLb8fPbl Sfz9GVoXlViKMxINtZiLihMB/3CNO6cCAAA= X-CFilter-Loop: Reflected From: Hyeongtak Ji This patch introduces DAMOS_PROMOTE action for paddr mode. It includes renaming alloc_demote_folio to alloc_migrate_folio to use it for promotion as well. The execution sequence of DAMOS_DEMOTE and DAMOS_PROMOTE look as follows for comparison. DAMOS_DEMOTE action damo_pa_apply_scheme -> damon_pa_reclaim -> demote_pages -> do_demote_folio_list -> __demote_folio_list -> demote_folio_list DAMOS_PROMOTE action damo_pa_apply_scheme -> damon_pa_promote -> promote_pages -> do_promote_folio_list -> __promote_folio_list -> promote_folio_list Signed-off-by: Hyeongtak Ji Signed-off-by: Honggyu Kim --- include/linux/damon.h | 2 + include/linux/migrate_mode.h | 1 + include/linux/vm_event_item.h | 1 + include/trace/events/migrate.h | 3 +- mm/damon/paddr.c | 29 ++++++++ mm/damon/sysfs-schemes.c | 1 + mm/internal.h | 1 + mm/vmscan.c | 129 ++++++++++++++++++++++++++++++++- mm/vmstat.c | 1 + 9 files changed, 165 insertions(+), 3 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 4c0a0fef09c5..477060bb6718 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -107,6 +107,7 @@ struct damon_target { * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists. * @DAMOS_STAT: Do nothing but count the stat. * @DAMOS_DEMOTE: Do demotion for the current region. + * @DAMOS_PROMOTE: Do promotion if possible, otherwise do nothing. * @NR_DAMOS_ACTIONS: Total number of DAMOS actions * * The support of each action is up to running &struct damon_operations. @@ -125,6 +126,7 @@ enum damos_action { DAMOS_LRU_DEPRIO, DAMOS_STAT, /* Do nothing but only record the stat */ DAMOS_DEMOTE, + DAMOS_PROMOTE, NR_DAMOS_ACTIONS, }; diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index f37cc03f9369..63f75eb9abf3 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -29,6 +29,7 @@ enum migrate_reason { MR_CONTIG_RANGE, MR_LONGTERM_PIN, MR_DEMOTION, + MR_PROMOTION, MR_TYPES }; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 8abfa1240040..63cf920afeaa 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -44,6 +44,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, + PGPROMOTE, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_KHUGEPAGED, diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 0190ef725b43..f0dd569c1e62 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -22,7 +22,8 @@ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ EM( MR_CONTIG_RANGE, "contig_range") \ EM( MR_LONGTERM_PIN, "longterm_pin") \ - EMe(MR_DEMOTION, "demotion") + EM( MR_DEMOTION, "demotion") \ + EMe(MR_PROMOTION, "promotion") /* * First define the enums in the above macros to be exported to userspace diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index d3e3f077cd00..360ce69d5898 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -257,6 +257,32 @@ static unsigned long damon_pa_reclaim(struct damon_region *r, struct damos *s, b return applied * PAGE_SIZE; } +static unsigned long damon_pa_promote(struct damon_region *r, struct damos *s) +{ + unsigned long addr, applied; + LIST_HEAD(folio_list); + + for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + struct folio *folio = damon_get_folio(PHYS_PFN(addr)); + + if (!folio) + continue; + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + + if (!folio_isolate_lru(folio)) + goto put_folio; + + list_add(&folio->lru, &folio_list); +put_folio: + folio_put(folio); + } + applied = promote_pages(&folio_list); + cond_resched(); + return applied * PAGE_SIZE; +} + static inline unsigned long damon_pa_mark_accessed_or_deactivate( struct damon_region *r, struct damos *s, bool mark_accessed) { @@ -309,6 +335,8 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, break; case DAMOS_DEMOTE: return damon_pa_reclaim(r, scheme, true); + case DAMOS_PROMOTE: + return damon_pa_promote(r, scheme); default: /* DAMOS actions that not yet supported by 'paddr'. */ break; @@ -326,6 +354,7 @@ static int damon_pa_scheme_score(struct damon_ctx *context, case DAMOS_DEMOTE: return damon_cold_score(context, r, scheme); case DAMOS_LRU_PRIO: + case DAMOS_PROMOTE: return damon_hot_score(context, r, scheme); default: break; diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index ac7cd3f17b12..1b84d0af7e1f 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1188,6 +1188,7 @@ static const char * const damon_sysfs_damos_action_strs[] = { "lru_deprio", "stat", "demote", + "promote", }; static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( diff --git a/mm/internal.h b/mm/internal.h index 2380397ec2f3..f159455e63d4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -870,6 +870,7 @@ unsigned long reclaim_pages(struct list_head *folio_list); unsigned int reclaim_clean_pages_from_list(struct zone *zone, struct list_head *folio_list); unsigned long demote_pages(struct list_head *folio_list); +unsigned long promote_pages(struct list_head *folio_list); /* The ALLOC_WMARK bits are used as an index to zone->watermark */ #define ALLOC_WMARK_MIN WMARK_MIN #define ALLOC_WMARK_LOW WMARK_LOW diff --git a/mm/vmscan.c b/mm/vmscan.c index eaa3dd6b7562..f03be320f9ad 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -910,7 +910,7 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } -static struct folio *alloc_demote_folio(struct folio *src, +static struct folio *alloc_migrate_folio(struct folio *src, unsigned long private) { struct folio *dst; @@ -973,7 +973,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios, node_get_allowed_targets(pgdat, &allowed_mask); /* Demotion ignores all cpuset and mempolicy settings */ - migrate_pages(demote_folios, alloc_demote_folio, NULL, + migrate_pages(demote_folios, alloc_migrate_folio, NULL, (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, &nr_succeeded); @@ -982,6 +982,48 @@ static unsigned int demote_folio_list(struct list_head *demote_folios, return nr_succeeded; } +/* + * Take folios on @promote_folios and attempt to promote them to another node. + * Folios which are not promoted are left on @promote_folios. + */ +static unsigned int promote_folio_list(struct list_head *promote_folios, + struct pglist_data *pgdat) +{ + int target_nid = next_promotion_node(pgdat->node_id); + unsigned int nr_succeeded; + nodemask_t allowed_mask = NODE_MASK_NONE; + + struct migration_target_control mtc = { + /* + * Allocate from 'node', or fail quickly and quietly. + * When this happens, 'page' will likely be stayed + * instead of migrated. + */ + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | + __GFP_NOMEMALLOC | GFP_NOWAIT, + .nid = target_nid, + .nmask = &allowed_mask + }; + + if (pgdat->node_id == target_nid) + return 0; + + if (list_empty(promote_folios)) + return 0; + + if (target_nid == NUMA_NO_NODE) + return 0; + + /* Promotion ignores all cpuset and mempolicy settings */ + migrate_pages(promote_folios, alloc_migrate_folio, NULL, + (unsigned long)&mtc, MIGRATE_ASYNC, MR_PROMOTION, + &nr_succeeded); + + __count_vm_events(PGPROMOTE, nr_succeeded); + + return nr_succeeded; +} + static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) { if (gfp_mask & __GFP_FS) @@ -1058,6 +1100,65 @@ static unsigned int __demote_folio_list(struct list_head *folio_list, return nr_demoted; } +/* + * __promote_folio_list() returns the number of promoted pages + */ +static unsigned int __promote_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat, struct scan_control *sc) +{ + LIST_HEAD(ret_folios); + LIST_HEAD(promote_folios); + unsigned int nr_promoted = 0; + + cond_resched(); + + while (!list_empty(folio_list)) { + struct folio *folio; + enum folio_references references; + + cond_resched(); + + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + + if (!folio_trylock(folio)) + goto keep; + + VM_BUG_ON_FOLIO(folio_test_active(folio), folio); + + references = folio_check_references(folio, sc); + if (references == FOLIOREF_KEEP || + references == FOLIOREF_RECLAIM || + references == FOLIOREF_RECLAIM_CLEAN) + goto keep_locked; + + /* Relocate its contents to another node. */ + list_add(&folio->lru, &promote_folios); + folio_unlock(folio); + continue; +keep_locked: + folio_unlock(folio); +keep: + list_add(&folio->lru, &ret_folios); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); + } + /* 'folio_list' is always empty here */ + + /* Migrate folios selected for promotion */ + nr_promoted += promote_folio_list(&promote_folios, pgdat); + /* Folios that could not be promoted are still in @promote_folios */ + if (!list_empty(&promote_folios)) { + /* Folios which weren't promoted go back on @folio_list */ + list_splice_init(&promote_folios, folio_list); + } + + try_to_unmap_flush(); + + list_splice(&ret_folios, folio_list); + + return nr_promoted; +} + /* * shrink_folio_list() returns the number of reclaimed pages */ @@ -2186,6 +2287,25 @@ static unsigned int do_demote_folio_list(struct list_head *folio_list, return nr_demoted; } +static unsigned int do_promote_folio_list(struct list_head *folio_list, + struct pglist_data *pgdat) +{ + unsigned int nr_promoted; + struct folio *folio; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + }; + + nr_promoted = __promote_folio_list(folio_list, pgdat, &sc); + while (!list_empty(folio_list)) { + folio = lru_to_folio(folio_list); + list_del(&folio->lru); + folio_putback_lru(folio); + } + + return nr_promoted; +} + static unsigned long reclaim_or_migrate_folios(struct list_head *folio_list, unsigned int (*handler)(struct list_head *, struct pglist_data *)) { @@ -2230,6 +2350,11 @@ unsigned long demote_pages(struct list_head *folio_list) return reclaim_or_migrate_folios(folio_list, do_demote_folio_list); } +unsigned long promote_pages(struct list_head *folio_list) +{ + return reclaim_or_migrate_folios(folio_list, do_promote_folio_list); +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { diff --git a/mm/vmstat.c b/mm/vmstat.c index 359460deb377..c703abdb8137 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1282,6 +1282,7 @@ const char * const vmstat_text[] = { "pgdemote_kswapd", "pgdemote_direct", "pgdemote_khugepaged", + "pgpromote", "pgscan_kswapd", "pgscan_direct", "pgscan_khugepaged",