diff mbox

[RFC,2/7] init: add set_ro_mostly_after_init_rw/ro function

Message ID 1487498660-16600-2-git-send-email-hoeun.ryu@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Hoeun Ryu Feb. 19, 2017, 10:04 a.m. UTC
Add set_ro_mostly_after_init_rw/ro pair to modify memory attributes for
memory marked as `ro_mostly_after_init`.

 I am doubtful that this is the right place where these functions reside and
these functions are suitable for all architectures for memory attributes
modification. Please comment.

Signed-off-by: Hoeun Ryu <hoeun.ryu@gmail.com>
---
 include/linux/init.h |  6 ++++++
 init/main.c          | 24 ++++++++++++++++++++++++
 2 files changed, 30 insertions(+)

Comments

Mark Rutland Feb. 20, 2017, 10:22 a.m. UTC | #1
On Sun, Feb 19, 2017 at 07:04:05PM +0900, Hoeun Ryu wrote:
>  Add set_ro_mostly_after_init_rw/ro pair to modify memory attributes for
> memory marked as `ro_mostly_after_init`.
> 
>  I am doubtful that this is the right place where these functions reside and
> these functions are suitable for all architectures for memory attributes
> modification. Please comment.

These won't work for arm64, since set_memory_* only work on
page-granular mappings in the vmalloc area.

The "real" kernel mappings can use larger block mappings, and would need
to be split (which cannot be done at runtime) before permissions could
be changed at page granularity.

Thanks,
Mark.

> Signed-off-by: Hoeun Ryu <hoeun.ryu@gmail.com>
> ---
>  include/linux/init.h |  6 ++++++
>  init/main.c          | 24 ++++++++++++++++++++++++
>  2 files changed, 30 insertions(+)
> 
> diff --git a/include/linux/init.h b/include/linux/init.h
> index 79af096..d68e4f7 100644
> --- a/include/linux/init.h
> +++ b/include/linux/init.h
> @@ -131,6 +131,12 @@ extern bool rodata_enabled;
>  #endif
>  #ifdef CONFIG_STRICT_KERNEL_RWX
>  void mark_rodata_ro(void);
> +
> +void set_ro_mostly_after_init_rw(void);
> +void set_ro_mostly_after_init_ro(void);
> +#else
> +static inline void set_ro_mostly_after_init_rw(void) { }
> +static inline void set_ro_mostly_after_init_ro(void) { }
>  #endif
>  
>  extern void (*late_time_init)(void);
> diff --git a/init/main.c b/init/main.c
> index 4719abf..a5d4873 100644
> --- a/init/main.c
> +++ b/init/main.c
> @@ -941,6 +941,30 @@ static void mark_readonly(void)
>  	} else
>  		pr_info("Kernel memory protection disabled.\n");
>  }
> +
> +void set_ro_mostly_after_init_rw(void)
> +{
> +	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
> +	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
> +	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
> +
> +	if (!rodata_enabled)
> +		return;
> +
> +	set_memory_rw(start, nr_pages);
> +}
> +
> +void set_ro_mostly_after_init_ro(void)
> +{
> +	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
> +	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
> +	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
> +
> +	if (!rodata_enabled)
> +		return;
> +
> +	set_memory_ro(start, nr_pages);
> +}
>  #else
>  static inline void mark_readonly(void)
>  {
> -- 
> 2.7.4
>
Hoeun Ryu Feb. 21, 2017, 6:33 a.m. UTC | #2
> On 20 Feb 2017, at 7:22 PM, Mark Rutland <mark.rutland@arm.com> wrote:
> 
> On Sun, Feb 19, 2017 at 07:04:05PM +0900, Hoeun Ryu wrote:
>> Add set_ro_mostly_after_init_rw/ro pair to modify memory attributes for
>> memory marked as `ro_mostly_after_init`.
>> 
>> I am doubtful that this is the right place where these functions reside and
>> these functions are suitable for all architectures for memory attributes
>> modification. Please comment.
> 
> These won't work for arm64, since set_memory_* only work on
> page-granular mappings in the vmalloc area.
> 
> The "real" kernel mappings can use larger block mappings, and would need
> to be split (which cannot be done at runtime) before permissions could
> be changed at page granularity.

So I sent RFC 6/7 [1] and 7/7 [2] that splits the block mapping to the page granular.
I think you and Ard Biesheuvel don’t like it anyway.

[1] : https://lkml.org/lkml/2017/2/19/38
[2] : https://lkml.org/lkml/2017/2/19/39

> 
> Thanks,
> Mark.
> 
>> Signed-off-by: Hoeun Ryu <hoeun.ryu@gmail.com>
>> ---
>> include/linux/init.h |  6 ++++++
>> init/main.c          | 24 ++++++++++++++++++++++++
>> 2 files changed, 30 insertions(+)
>> 
>> diff --git a/include/linux/init.h b/include/linux/init.h
>> index 79af096..d68e4f7 100644
>> --- a/include/linux/init.h
>> +++ b/include/linux/init.h
>> @@ -131,6 +131,12 @@ extern bool rodata_enabled;
>> #endif
>> #ifdef CONFIG_STRICT_KERNEL_RWX
>> void mark_rodata_ro(void);
>> +
>> +void set_ro_mostly_after_init_rw(void);
>> +void set_ro_mostly_after_init_ro(void);
>> +#else
>> +static inline void set_ro_mostly_after_init_rw(void) { }
>> +static inline void set_ro_mostly_after_init_ro(void) { }
>> #endif
>> 
>> extern void (*late_time_init)(void);
>> diff --git a/init/main.c b/init/main.c
>> index 4719abf..a5d4873 100644
>> --- a/init/main.c
>> +++ b/init/main.c
>> @@ -941,6 +941,30 @@ static void mark_readonly(void)
>> 	} else
>> 		pr_info("Kernel memory protection disabled.\n");
>> }
>> +
>> +void set_ro_mostly_after_init_rw(void)
>> +{
>> +	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
>> +	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
>> +	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
>> +
>> +	if (!rodata_enabled)
>> +		return;
>> +
>> +	set_memory_rw(start, nr_pages);
>> +}
>> +
>> +void set_ro_mostly_after_init_ro(void)
>> +{
>> +	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
>> +	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
>> +	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
>> +
>> +	if (!rodata_enabled)
>> +		return;
>> +
>> +	set_memory_ro(start, nr_pages);
>> +}
>> #else
>> static inline void mark_readonly(void)
>> {
>> -- 
>> 2.7.4
>>
diff mbox

Patch

diff --git a/include/linux/init.h b/include/linux/init.h
index 79af096..d68e4f7 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -131,6 +131,12 @@  extern bool rodata_enabled;
 #endif
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mark_rodata_ro(void);
+
+void set_ro_mostly_after_init_rw(void);
+void set_ro_mostly_after_init_ro(void);
+#else
+static inline void set_ro_mostly_after_init_rw(void) { }
+static inline void set_ro_mostly_after_init_ro(void) { }
 #endif
 
 extern void (*late_time_init)(void);
diff --git a/init/main.c b/init/main.c
index 4719abf..a5d4873 100644
--- a/init/main.c
+++ b/init/main.c
@@ -941,6 +941,30 @@  static void mark_readonly(void)
 	} else
 		pr_info("Kernel memory protection disabled.\n");
 }
+
+void set_ro_mostly_after_init_rw(void)
+{
+	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
+	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
+	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
+
+	if (!rodata_enabled)
+		return;
+
+	set_memory_rw(start, nr_pages);
+}
+
+void set_ro_mostly_after_init_ro(void)
+{
+	unsigned long start = PFN_ALIGN(__start_data_ro_mostly_after_init);
+	unsigned long end = PFN_ALIGN(&__end_data_ro_mostly_after_init);
+	unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
+
+	if (!rodata_enabled)
+		return;
+
+	set_memory_ro(start, nr_pages);
+}
 #else
 static inline void mark_readonly(void)
 {