diff mbox series

mm: vmscan: support equal reclaim for anon and file pages

Message ID c617a0c6cb2a7e3bc78998ad7e2bceb22df157c2.1610398598.git.sudaraja@codeaurora.org (mailing list archive)
State New, archived
Headers show
Series mm: vmscan: support equal reclaim for anon and file pages | expand

Commit Message

Sudarshan Rajagopalan Jan. 11, 2021, 8:58 p.m. UTC
When performing memory reclaim support treating anonymous and
file backed pages equally.
Swapping anonymous pages out to memory can be efficient enough
to justify treating anonymous and file backed pages equally.

Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
---
 mm/vmscan.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

Comments

Yang Shi Jan. 11, 2021, 9:21 p.m. UTC | #1
On Mon, Jan 11, 2021 at 12:59 PM Sudarshan Rajagopalan
<sudaraja@codeaurora.org> wrote:
>
> When performing memory reclaim support treating anonymous and
> file backed pages equally.
> Swapping anonymous pages out to memory can be efficient enough
> to justify treating anonymous and file backed pages equally.
>
> Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> ---
>  mm/vmscan.c | 15 +++++++++++++--
>  1 file changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 257cba79a96d..ec7585e0d5f5 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -169,6 +169,8 @@ struct scan_control {
>   */
>  int vm_swappiness = 60;
>
> +bool balance_anon_file_reclaim = false;

I think the same effect could be achieved by adjusting swappiness. The
"swappiness" can go to 200 now.

Please check the document at Documentation/admin-guide/sysctl/vm.rst, it says:

This control is used to define the rough relative IO cost of swapping
and filesystem paging, as a value between 0 and 200. At 100, the VM
assumes equal IO cost and will thus apply memory pressure to the page
cache and swap-backed pages equally; lower values signify more
expensive swap IO, higher values indicates cheaper.

> +
>  static void set_task_reclaim_state(struct task_struct *task,
>                                    struct reclaim_state *rs)
>  {
> @@ -201,6 +203,13 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  static DEFINE_IDR(shrinker_idr);
>  static int shrinker_nr_max;
>
> +static int __init cmdline_parse_balance_reclaim(char *p)
> +{
> +       balance_anon_file_reclaim = true;
> +       return 0;
> +}
> +early_param("balance_reclaim", cmdline_parse_balance_reclaim);
> +
>  static int prealloc_memcg_shrinker(struct shrinker *shrinker)
>  {
>         int id, ret = -ENOMEM;
> @@ -2291,9 +2300,11 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
>
>         /*
>          * If there is enough inactive page cache, we do not reclaim
> -        * anything from the anonymous working right now.
> +        * anything from the anonymous working right now. But when balancing
> +        * anon and page cache files for reclaim, allow swapping of anon pages
> +        * even if there are a number of inactive file cache pages.
>          */
> -       if (sc->cache_trim_mode) {
> +       if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
>                 scan_balance = SCAN_FILE;
>                 goto out;
>         }
> --
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
> a Linux Foundation Collaborative Project
>
>
Michal Hocko Jan. 12, 2021, 7:03 a.m. UTC | #2
On Mon 11-01-21 12:58:43, Sudarshan Rajagopalan wrote:
> When performing memory reclaim support treating anonymous and
> file backed pages equally.
> Swapping anonymous pages out to memory can be efficient enough
> to justify treating anonymous and file backed pages equally.

This changelog doesn't explain, what kind of problem you are trying to
address, why the existing code and tunables are insufficient, how it is
supposed to work, what are potential risks, what kind of testing have
you done and more.

Please have a look at Documentation/process/submitting-patches.rst for
more guidance about expectations from a patch when it is submitted.
 
> Signed-off-by: Sudarshan Rajagopalan <sudaraja@codeaurora.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> ---
>  mm/vmscan.c | 15 +++++++++++++--
>  1 file changed, 13 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 257cba79a96d..ec7585e0d5f5 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -169,6 +169,8 @@ struct scan_control {
>   */
>  int vm_swappiness = 60;
>  
> +bool balance_anon_file_reclaim = false;
> +
>  static void set_task_reclaim_state(struct task_struct *task,
>  				   struct reclaim_state *rs)
>  {
> @@ -201,6 +203,13 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  static DEFINE_IDR(shrinker_idr);
>  static int shrinker_nr_max;
>  
> +static int __init cmdline_parse_balance_reclaim(char *p)
> +{
> +	balance_anon_file_reclaim = true;
> +	return 0;
> +}
> +early_param("balance_reclaim", cmdline_parse_balance_reclaim);
> +
>  static int prealloc_memcg_shrinker(struct shrinker *shrinker)
>  {
>  	int id, ret = -ENOMEM;
> @@ -2291,9 +2300,11 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
>  
>  	/*
>  	 * If there is enough inactive page cache, we do not reclaim
> -	 * anything from the anonymous working right now.
> +	 * anything from the anonymous working right now. But when balancing
> +	 * anon and page cache files for reclaim, allow swapping of anon pages
> +	 * even if there are a number of inactive file cache pages.
>  	 */
> -	if (sc->cache_trim_mode) {
> +	if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
>  		scan_balance = SCAN_FILE;
>  		goto out;
>  	}
> -- 
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
> a Linux Foundation Collaborative Project
>
diff mbox series

Patch

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 257cba79a96d..ec7585e0d5f5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -169,6 +169,8 @@  struct scan_control {
  */
 int vm_swappiness = 60;
 
+bool balance_anon_file_reclaim = false;
+
 static void set_task_reclaim_state(struct task_struct *task,
 				   struct reclaim_state *rs)
 {
@@ -201,6 +203,13 @@  static DECLARE_RWSEM(shrinker_rwsem);
 static DEFINE_IDR(shrinker_idr);
 static int shrinker_nr_max;
 
+static int __init cmdline_parse_balance_reclaim(char *p)
+{
+	balance_anon_file_reclaim = true;
+	return 0;
+}
+early_param("balance_reclaim", cmdline_parse_balance_reclaim);
+
 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
 	int id, ret = -ENOMEM;
@@ -2291,9 +2300,11 @@  static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
 
 	/*
 	 * If there is enough inactive page cache, we do not reclaim
-	 * anything from the anonymous working right now.
+	 * anything from the anonymous working right now. But when balancing
+	 * anon and page cache files for reclaim, allow swapping of anon pages
+	 * even if there are a number of inactive file cache pages.
 	 */
-	if (sc->cache_trim_mode) {
+	if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
 		scan_balance = SCAN_FILE;
 		goto out;
 	}