diff mbox series

[v4,6/6] mm,thp: avoid writes to file with THP in pagecache

Message ID 20190620172752.3300742-7-songliubraving@fb.com (mailing list archive)
State New, archived
Headers show
Series Enable THP for text section of non-shmem files | expand

Commit Message

Song Liu June 20, 2019, 5:27 p.m. UTC
In previous patch, an application could put part of its text section in
THP via madvise(). These THPs will be protected from writes when the
application is still running (TXTBSY). However, after the application
exits, the file is available for writes.

This patch avoids writes to file THP by dropping page cache for the file
when the last vma with VM_DENYWRITE is removed. A new counter nr_thps is
added to struct address_space. In exit_mmap(), if nr_thps is non-zero, we
drop page cache for the whole file.

Signed-off-by: Song Liu <songliubraving@fb.com>
---
 fs/inode.c         |  3 +++
 include/linux/fs.h | 31 +++++++++++++++++++++++++++++++
 mm/filemap.c       |  1 +
 mm/khugepaged.c    |  4 +++-
 mm/mmap.c          | 14 ++++++++++++++
 5 files changed, 52 insertions(+), 1 deletion(-)

Comments

Rik van Riel June 20, 2019, 5:42 p.m. UTC | #1
On Thu, 2019-06-20 at 10:27 -0700, Song Liu wrote:

> +++ b/mm/mmap.c
> @@ -3088,6 +3088,18 @@ int vm_brk(unsigned long addr, unsigned long
> len)
>  }
>  EXPORT_SYMBOL(vm_brk);
>  
> +static inline void release_file_thp(struct vm_area_struct *vma)
> +{
> +#ifdef CONFIG_READ_ONLY_THP_FOR_FS
> +	struct file *file = vma->vm_file;
> +
> +	if (file && (vma->vm_flags & VM_DENYWRITE) &&
> +	    atomic_read(&file_inode(file)->i_writecount) == 0 &&
> +	    filemap_nr_thps(file_inode(file)->i_mapping))
> +		truncate_pagecache(file_inode(file), 0);
> +#endif
> +}
> +
>  /* Release all mmaps. */
>  void exit_mmap(struct mm_struct *mm)
>  {
> @@ -3153,6 +3165,8 @@ void exit_mmap(struct mm_struct *mm)
>  	while (vma) {
>  		if (vma->vm_flags & VM_ACCOUNT)
>  			nr_accounted += vma_pages(vma);
> +
> +		release_file_thp(vma);
>  		vma = remove_vma(vma);
>  	}
>  	vm_unacct_memory(nr_accounted);

I like how you make the file accessible again to other
users, but am somewhat unsure about the mechanism used.

First, if multiple processes have the same file mmapped,
do you really want to blow away the page cache?

Secondly, by hooking into exit_mmap, you miss making
files writable again that get unmapped through munmap.

Would it be better to blow away the page cache when
the last mmap user unmaps it?

The page->mapping->i_mmap interval tree will be empty
when nobody has the file mmap()d.

Alternatively, open() could check whether the file is
currently mmaped, and blow away the page cache then.
That would leave the page cache intact if the same file 
gets execve()d several times in a row without any writes
in-between, which seems like it might be a relatively
common case.
diff mbox series

Patch

diff --git a/fs/inode.c b/fs/inode.c
index df6542ec3b88..518113a4e219 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -181,6 +181,9 @@  int inode_init_always(struct super_block *sb, struct inode *inode)
 	mapping->flags = 0;
 	mapping->wb_err = 0;
 	atomic_set(&mapping->i_mmap_writable, 0);
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	atomic_set(&mapping->nr_thps, 0);
+#endif
 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
 	mapping->private_data = NULL;
 	mapping->writeback_index = 0;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f7fdfe93e25d..3edf4ee42eee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -444,6 +444,10 @@  struct address_space {
 	struct xarray		i_pages;
 	gfp_t			gfp_mask;
 	atomic_t		i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	/* number of thp, only for non-shmem files */
+	atomic_t		nr_thps;
+#endif
 	struct rb_root_cached	i_mmap;
 	struct rw_semaphore	i_mmap_rwsem;
 	unsigned long		nrpages;
@@ -2790,6 +2794,33 @@  static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
 	return errseq_sample(&mapping->wb_err);
 }
 
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	return atomic_read(&mapping->nr_thps);
+#else
+	return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	atomic_inc(&mapping->nr_thps);
+#else
+	WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	atomic_dec(&mapping->nr_thps);
+#else
+	WARN_ON_ONCE(1);
+#endif
+}
+
 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
 			   int datasync);
 extern int vfs_fsync(struct file *file, int datasync);
diff --git a/mm/filemap.c b/mm/filemap.c
index e79ceccdc6df..a8e86c136381 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -205,6 +205,7 @@  static void unaccount_page_cache_page(struct address_space *mapping,
 			__dec_node_page_state(page, NR_SHMEM_THPS);
 	} else if (PageTransHuge(page)) {
 		__dec_node_page_state(page, NR_FILE_THPS);
+		filemap_nr_thps_dec(mapping);
 	}
 
 	/*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fbcff5a1d65a..17ebe9da56ce 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1500,8 +1500,10 @@  static void collapse_file(struct vm_area_struct *vma,
 
 	if (is_shmem)
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
-	else
+	else {
 		__inc_node_page_state(new_page, NR_FILE_THPS);
+		filemap_nr_thps_inc(mapping);
+	}
 
 	if (nr_none) {
 		struct zone *zone = page_zone(new_page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 7e8c3e8ae75f..8094ce028d74 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3088,6 +3088,18 @@  int vm_brk(unsigned long addr, unsigned long len)
 }
 EXPORT_SYMBOL(vm_brk);
 
+static inline void release_file_thp(struct vm_area_struct *vma)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+	struct file *file = vma->vm_file;
+
+	if (file && (vma->vm_flags & VM_DENYWRITE) &&
+	    atomic_read(&file_inode(file)->i_writecount) == 0 &&
+	    filemap_nr_thps(file_inode(file)->i_mapping))
+		truncate_pagecache(file_inode(file), 0);
+#endif
+}
+
 /* Release all mmaps. */
 void exit_mmap(struct mm_struct *mm)
 {
@@ -3153,6 +3165,8 @@  void exit_mmap(struct mm_struct *mm)
 	while (vma) {
 		if (vma->vm_flags & VM_ACCOUNT)
 			nr_accounted += vma_pages(vma);
+
+		release_file_thp(vma);
 		vma = remove_vma(vma);
 	}
 	vm_unacct_memory(nr_accounted);