diff mbox series

[RFC,39/43] shmem: optimize adding pages to the LRU in shmem_insert_pages()

Message ID 1588812129-8596-40-git-send-email-anthony.yznaga@oracle.com (mailing list archive)
State New, archived
Headers show
Series PKRAM: Preserved-over-Kexec RAM | expand

Commit Message

Anthony Yznaga May 7, 2020, 12:42 a.m. UTC
Reduce LRU lock contention when inserting shmem pages by staging pages
to be added to the same LRU and adding them en masse.

Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
---
 mm/shmem.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index ca5edf580f24..678a396ba8d3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -789,9 +789,12 @@  int shmem_insert_pages(struct mm_struct *mm, struct inode *inode, pgoff_t index,
 	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 	gfp_t gfp = mapping_gfp_mask(mapping);
 	struct mem_cgroup *memcg;
+	struct lru_splice splice;
 	int i, err;
 	int nr = 0;
 
+	memset(&splice, 0, sizeof(splice));
+
 	for (i = 0; i < npages; i++)
 		nr += compound_nr(pages[i]);
 
@@ -866,7 +869,7 @@  int shmem_insert_pages(struct mm_struct *mm, struct inode *inode, pgoff_t index,
 		}
 
 		if (!PageLRU(pages[i]))
-			lru_cache_add_anon(pages[i]);
+			lru_splice_add_anon(pages[i], &splice);
 
 		flush_dcache_page(pages[i]);
 		SetPageUptodate(pages[i]);
@@ -875,6 +878,9 @@  int shmem_insert_pages(struct mm_struct *mm, struct inode *inode, pgoff_t index,
 		unlock_page(pages[i]);
 	}
 
+	if (splice.pgdat)
+		add_splice_to_lru_list(&splice);
+
 	return 0;
 
 out_truncate: