diff mbox series

[07/29] lustre: readahead: limit over reservation

Message ID 1619381316-7719-8-git-send-email-jsimmons@infradead.org (mailing list archive)
State New, archived
Headers show
Series lustre: Update to OpenSFS tree as of April 25, 2020 | expand

Commit Message

James Simmons April 25, 2021, 8:08 p.m. UTC
From: Wang Shilong <wshilong@ddn.com>

For performance reason, exceeding @ra_max_pages are allowed to
cover current read window, but this should be limited with RPC
size in case a large block size read issued. Trim to RPC boundary.

Otherwise, too many read ahead pages might be issued and
make client short of LRU pages.

Fixes: 35b7c43c21 ("lustre: llite: allow current readahead to exceed reservation")
WC-bug-id: https://jira.whamcloud.com/browse/LU-12142
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Reviewed-on: https://review.whamcloud.com/42060
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
 fs/lustre/llite/lproc_llite.c | 10 ++++++++--
 fs/lustre/llite/rw.c          |  8 ++++++++
 2 files changed, 16 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/fs/lustre/llite/lproc_llite.c b/fs/lustre/llite/lproc_llite.c
index ec241a4..4ce6fab 100644
--- a/fs/lustre/llite/lproc_llite.c
+++ b/fs/lustre/llite/lproc_llite.c
@@ -455,6 +455,7 @@  static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
 	struct super_block *sb = m->private;
 	struct ll_sb_info *sbi = ll_s2sbi(sb);
 	struct cl_client_cache *cache = sbi->ll_cache;
+	struct ll_ra_info *ra = &sbi->ll_ra_info;
 	long max_cached_mb;
 	long unused_mb;
 
@@ -462,17 +463,22 @@  static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
 	max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
 	unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
 	mutex_unlock(&cache->ccc_max_cache_mb_lock);
+
 	seq_printf(m,
 		   "users: %d\n"
 		   "max_cached_mb: %ld\n"
 		   "used_mb: %ld\n"
 		   "unused_mb: %ld\n"
-		   "reclaim_count: %u\n",
+		   "reclaim_count: %u\n"
+		   "max_read_ahead_mb: %lu\n"
+		   "used_read_ahead_mb: %d\n",
 		   refcount_read(&cache->ccc_users),
 		   max_cached_mb,
 		   max_cached_mb - unused_mb,
 		   unused_mb,
-		   cache->ccc_lru_shrinkers);
+		   cache->ccc_lru_shrinkers,
+		   PAGES_TO_MiB(ra->ra_max_pages),
+		   PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
 	return 0;
 }
 
diff --git a/fs/lustre/llite/rw.c b/fs/lustre/llite/rw.c
index 8bba97f..2d08767 100644
--- a/fs/lustre/llite/rw.c
+++ b/fs/lustre/llite/rw.c
@@ -788,6 +788,14 @@  static int ll_readahead(const struct lu_env *env, struct cl_io *io,
 			vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
 		pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
 			    ria->ria_start_idx;
+		/**
+		 * For performance reason, exceeding @ra_max_pages
+		 * are allowed, but this should be limited with RPC
+		 * size in case a large block size read issued. Trim
+		 * to RPC boundary.
+		 */
+		pages_min = min(pages_min, ras->ras_rpc_pages -
+				(ria->ria_start_idx % ras->ras_rpc_pages));
 	}
 
 	/* don't over reserved for mmap range read */