diff mbox series

[v7,3/7] set_memory: allow set_direct_map_*_noflush() for multiple pages

Message ID 20201026083752.13267-4-rppt@kernel.org (mailing list archive)
State New, archived
Headers show
Series mm: introduce memfd_secret system call to create "secret" memory areas | expand

Commit Message

Mike Rapoport Oct. 26, 2020, 8:37 a.m. UTC
From: Mike Rapoport <rppt@linux.ibm.com>

The underlying implementations of set_direct_map_invalid_noflush() and
set_direct_map_default_noflush() allow updating multiple contiguous pages
at once.

Add numpages parameter to set_direct_map_*_noflush() to expose this ability
with these APIs.

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
---
 arch/arm64/include/asm/cacheflush.h |  4 ++--
 arch/arm64/mm/pageattr.c            | 10 ++++++----
 arch/riscv/include/asm/set_memory.h |  4 ++--
 arch/riscv/mm/pageattr.c            |  8 ++++----
 arch/x86/include/asm/set_memory.h   |  4 ++--
 arch/x86/mm/pat/set_memory.c        |  8 ++++----
 include/linux/set_memory.h          |  4 ++--
 mm/vmalloc.c                        |  5 +++--
 8 files changed, 25 insertions(+), 22 deletions(-)

Comments

Edgecombe, Rick P Oct. 26, 2020, 7:01 p.m. UTC | #1
On Mon, 2020-10-26 at 10:37 +0200, Mike Rapoport wrote:
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -2184,14 +2184,14 @@ static int __set_pages_np(struct page *page,
> int numpages)
>         return __change_page_attr_set_clr(&cpa, 0);
>  }
>  
> -int set_direct_map_invalid_noflush(struct page *page)
> +int set_direct_map_invalid_noflush(struct page *page, int numpages)
>  {
> -       return __set_pages_np(page, 1);
> +       return __set_pages_np(page, numpages);
>  }
>  
> -int set_direct_map_default_noflush(struct page *page)
> +int set_direct_map_default_noflush(struct page *page, int numpages)
>  {
> -       return __set_pages_p(page, 1);
> +       return __set_pages_p(page, numpages);
>  }

Somewhat related to your other series, this could result in large NP
pages and trip up hibernate.
David Hildenbrand Oct. 27, 2020, 8:12 a.m. UTC | #2
On 26.10.20 20:01, Edgecombe, Rick P wrote:
> On Mon, 2020-10-26 at 10:37 +0200, Mike Rapoport wrote:
>> +++ b/arch/x86/mm/pat/set_memory.c
>> @@ -2184,14 +2184,14 @@ static int __set_pages_np(struct page *page,
>> int numpages)
>>         return __change_page_attr_set_clr(&cpa, 0);
>>  }
>>  
>> -int set_direct_map_invalid_noflush(struct page *page)
>> +int set_direct_map_invalid_noflush(struct page *page, int numpages)
>>  {
>> -       return __set_pages_np(page, 1);
>> +       return __set_pages_np(page, numpages);
>>  }
>>  
>> -int set_direct_map_default_noflush(struct page *page)
>> +int set_direct_map_default_noflush(struct page *page, int numpages)
>>  {
>> -       return __set_pages_p(page, 1);
>> +       return __set_pages_p(page, numpages);
>>  }
> 
> Somewhat related to your other series, this could result in large NP
> pages and trip up hibernate.
> 

It feels somewhat desirable to disable hibernation once secretmem is
enabled, right? Otherwise you'll be writing out your secrets to swap,
where they will remain even after booting up again ...

Skipping secretmem pages when hibernating is the wrong approach I guess ...
Mike Rapoport Oct. 27, 2020, 9:48 a.m. UTC | #3
On Tue, Oct 27, 2020 at 09:12:23AM +0100, David Hildenbrand wrote:
> On 26.10.20 20:01, Edgecombe, Rick P wrote:
> > On Mon, 2020-10-26 at 10:37 +0200, Mike Rapoport wrote:
> >> +++ b/arch/x86/mm/pat/set_memory.c
> >> @@ -2184,14 +2184,14 @@ static int __set_pages_np(struct page *page,
> >> int numpages)
> >>         return __change_page_attr_set_clr(&cpa, 0);
> >>  }
> >>  
> >> -int set_direct_map_invalid_noflush(struct page *page)
> >> +int set_direct_map_invalid_noflush(struct page *page, int numpages)
> >>  {
> >> -       return __set_pages_np(page, 1);
> >> +       return __set_pages_np(page, numpages);
> >>  }
> >>  
> >> -int set_direct_map_default_noflush(struct page *page)
> >> +int set_direct_map_default_noflush(struct page *page, int numpages)
> >>  {
> >> -       return __set_pages_p(page, 1);
> >> +       return __set_pages_p(page, numpages);
> >>  }
> > 
> > Somewhat related to your other series, this could result in large NP
> > pages and trip up hibernate.
> > 
> 
> It feels somewhat desirable to disable hibernation once secretmem is
> enabled, right? Otherwise you'll be writing out your secrets to swap,
> where they will remain even after booting up again ...
> 
> Skipping secretmem pages when hibernating is the wrong approach I guess ...

Completely agree.
I'll look into preventing hibernation from touching secretmem.

> -- 
> Thanks,
> 
> David / dhildenb
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 9384fd8fc13c..831739bc93a6 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -138,8 +138,8 @@  static __always_inline void __flush_icache_all(void)
 
 int set_memory_valid(unsigned long addr, int numpages, int enable);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 
 #include <asm-generic/cacheflush.h>
 
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 1b94f5b82654..2d4e8c4cdab5 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -148,34 +148,36 @@  int set_memory_valid(unsigned long addr, int numpages, int enable)
 					__pgprot(PTE_VALID));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
 	struct page_change_data data = {
 		.set_mask = __pgprot(0),
 		.clear_mask = __pgprot(PTE_VALID),
 	};
+	unsigned long size = PAGE_SIZE * numpages;
 
 	if (!rodata_full)
 		return 0;
 
 	return apply_to_page_range(&init_mm,
 				   (unsigned long)page_address(page),
-				   PAGE_SIZE, change_page_range, &data);
+				   size, change_page_range, &data);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
 	struct page_change_data data = {
 		.set_mask = __pgprot(PTE_VALID | PTE_WRITE),
 		.clear_mask = __pgprot(PTE_RDONLY),
 	};
+	unsigned long size = PAGE_SIZE * numpages;
 
 	if (!rodata_full)
 		return 0;
 
 	return apply_to_page_range(&init_mm,
 				   (unsigned long)page_address(page),
-				   PAGE_SIZE, change_page_range, &data);
+				   size, change_page_range, &data);
 }
 
 void __kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 4c5bae7ca01c..e20f1bef9b11 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -22,8 +22,8 @@  static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 19fecb362d81..58743bb6b755 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -150,11 +150,11 @@  int set_memory_nx(unsigned long addr, int numpages)
 	return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
 	int ret;
 	unsigned long start = (unsigned long)page_address(page);
-	unsigned long end = start + PAGE_SIZE;
+	unsigned long end = start + PAGE_SIZE * numpages;
 	struct pageattr_masks masks = {
 		.set_mask = __pgprot(0),
 		.clear_mask = __pgprot(_PAGE_PRESENT)
@@ -167,11 +167,11 @@  int set_direct_map_invalid_noflush(struct page *page)
 	return ret;
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
 	int ret;
 	unsigned long start = (unsigned long)page_address(page);
-	unsigned long end = start + PAGE_SIZE;
+	unsigned long end = start + PAGE_SIZE * numpages;
 	struct pageattr_masks masks = {
 		.set_mask = PAGE_KERNEL,
 		.clear_mask = __pgprot(0)
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 5948218f35c5..2c5fb6b338e7 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -80,8 +80,8 @@  int set_pages_wb(struct page *page, int numpages);
 int set_pages_ro(struct page *page, int numpages);
 int set_pages_rw(struct page *page, int numpages);
 
-int set_direct_map_invalid_noflush(struct page *page);
-int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_invalid_noflush(struct page *page, int numpages);
+int set_direct_map_default_noflush(struct page *page, int numpages);
 
 extern int kernel_set_to_readonly;
 
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 40baa90e74f4..239bdddf6f96 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2184,14 +2184,14 @@  static int __set_pages_np(struct page *page, int numpages)
 	return __change_page_attr_set_clr(&cpa, 0);
 }
 
-int set_direct_map_invalid_noflush(struct page *page)
+int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
-	return __set_pages_np(page, 1);
+	return __set_pages_np(page, numpages);
 }
 
-int set_direct_map_default_noflush(struct page *page)
+int set_direct_map_default_noflush(struct page *page, int numpages)
 {
-	return __set_pages_p(page, 1);
+	return __set_pages_p(page, numpages);
 }
 
 void __kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 860e0f843c12..a938a3775082 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -15,11 +15,11 @@  static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
 #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
-static inline int set_direct_map_invalid_noflush(struct page *page)
+static inline int set_direct_map_invalid_noflush(struct page *page, int numpages)
 {
 	return 0;
 }
-static inline int set_direct_map_default_noflush(struct page *page)
+static inline int set_direct_map_default_noflush(struct page *page, int numpages)
 {
 	return 0;
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6ae491a8b210..670fc20ad44c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2179,13 +2179,14 @@  struct vm_struct *remove_vm_area(const void *addr)
 }
 
 static inline void set_area_direct_map(const struct vm_struct *area,
-				       int (*set_direct_map)(struct page *page))
+				       int (*set_direct_map)(struct page *page,
+							     int numpages))
 {
 	int i;
 
 	for (i = 0; i < area->nr_pages; i++)
 		if (page_address(area->pages[i]))
-			set_direct_map(area->pages[i]);
+			set_direct_map(area->pages[i], 1);
 }
 
 /* Handle removing and resetting vm mappings related to the vm_struct. */