diff mbox series

[v6,1/5] powerpc/mm: Implement set_memory() routines

Message ID 20191224055545.178462-2-ruscur@russell.cc (mailing list archive)
State New, archived
Headers show
Series Implement STRICT_MODULE_RWX for powerpc | expand

Commit Message

Russell Currey Dec. 24, 2019, 5:55 a.m. UTC
The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
and are generally useful primitives to have.  This implementation is
designed to be completely generic across powerpc's many MMUs.

It's possible that this could be optimised to be faster for specific
MMUs, but the focus is on having a generic and safe implementation for
now.

This implementation does not handle cases where the caller is attempting
to change the mapping of the page it is executing from, or if another
CPU is concurrently using the page being altered.  These cases likely
shouldn't happen, but a more complex implementation with MMU-specific code
could safely handle them, so that is left as a TODO for now.

Signed-off-by: Russell Currey <ruscur@russell.cc>
---
 arch/powerpc/Kconfig                  |  1 +
 arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
 arch/powerpc/mm/Makefile              |  1 +
 arch/powerpc/mm/pageattr.c            | 83 +++++++++++++++++++++++++++
 4 files changed, 117 insertions(+)
 create mode 100644 arch/powerpc/include/asm/set_memory.h
 create mode 100644 arch/powerpc/mm/pageattr.c

Comments

Christophe Leroy Jan. 8, 2020, 12:52 p.m. UTC | #1
Le 24/12/2019 à 06:55, Russell Currey a écrit :
> The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
> and are generally useful primitives to have.  This implementation is
> designed to be completely generic across powerpc's many MMUs.
> 
> It's possible that this could be optimised to be faster for specific
> MMUs, but the focus is on having a generic and safe implementation for
> now.
> 
> This implementation does not handle cases where the caller is attempting
> to change the mapping of the page it is executing from, or if another
> CPU is concurrently using the page being altered.  These cases likely
> shouldn't happen, but a more complex implementation with MMU-specific code
> could safely handle them, so that is left as a TODO for now.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> ---
>   arch/powerpc/Kconfig                  |  1 +
>   arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
>   arch/powerpc/mm/Makefile              |  1 +
>   arch/powerpc/mm/pageattr.c            | 83 +++++++++++++++++++++++++++
>   4 files changed, 117 insertions(+)
>   create mode 100644 arch/powerpc/include/asm/set_memory.h
>   create mode 100644 arch/powerpc/mm/pageattr.c
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 1ec34e16ed65..f0b9b47b5353 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -133,6 +133,7 @@ config PPC
>   	select ARCH_HAS_PTE_SPECIAL
>   	select ARCH_HAS_MEMBARRIER_CALLBACKS
>   	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
> +	select ARCH_HAS_SET_MEMORY
>   	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
>   	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
>   	select ARCH_HAS_UACCESS_FLUSHCACHE
> diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
> new file mode 100644
> index 000000000000..5230ddb2fefd
> --- /dev/null
> +++ b/arch/powerpc/include/asm/set_memory.h
> @@ -0,0 +1,32 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_POWERPC_SET_MEMORY_H
> +#define _ASM_POWERPC_SET_MEMORY_H
> +
> +#define SET_MEMORY_RO	1
> +#define SET_MEMORY_RW	2
> +#define SET_MEMORY_NX	3
> +#define SET_MEMORY_X	4

Maybe going from 0 to 3 would be better than 1 to 4

> +
> +int change_memory_attr(unsigned long addr, int numpages, int action);

action could be unsigned.

> +
> +static inline int set_memory_ro(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_RO);
> +}
> +
> +static inline int set_memory_rw(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_RW);
> +}
> +
> +static inline int set_memory_nx(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_NX);
> +}
> +
> +static inline int set_memory_x(unsigned long addr, int numpages)
> +{
> +	return change_memory_attr(addr, numpages, SET_MEMORY_X);
> +}
> +
> +#endif
> diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
> index 5e147986400d..d0a0bcbc9289 100644
> --- a/arch/powerpc/mm/Makefile
> +++ b/arch/powerpc/mm/Makefile
> @@ -20,3 +20,4 @@ obj-$(CONFIG_HIGHMEM)		+= highmem.o
>   obj-$(CONFIG_PPC_COPRO_BASE)	+= copro_fault.o
>   obj-$(CONFIG_PPC_PTDUMP)	+= ptdump/
>   obj-$(CONFIG_KASAN)		+= kasan/
> +obj-$(CONFIG_ARCH_HAS_SET_MEMORY) += pageattr.o

CONFIG_ARCH_HAS_SET_MEMORY is set inconditionnally, I think you should 
add pageattr.o to obj-y instead. CONFIG_ARCH_HAS_XXX are almost never 
used in Makefiles

> diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
> new file mode 100644
> index 000000000000..15d5fb04f531
> --- /dev/null
> +++ b/arch/powerpc/mm/pageattr.c
> @@ -0,0 +1,83 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +/*
> + * MMU-generic set_memory implementation for powerpc
> + *
> + * Copyright 2019, IBM Corporation.
> + */
> +
> +#include <linux/mm.h>
> +#include <linux/set_memory.h>
> +
> +#include <asm/mmu.h>
> +#include <asm/page.h>
> +#include <asm/pgtable.h>
> +
> +
> +/*
> + * Updates the attributes of a page in three steps:
> + *
> + * 1. invalidate the page table entry
> + * 2. flush the TLB
> + * 3. install the new entry with the updated attributes
> + *
> + * This is unsafe if the caller is attempting to change the mapping of the
> + * page it is executing from, or if another CPU is concurrently using the
> + * page being altered.
> + *
> + * TODO make the implementation resistant to this.
> + */
> +static int __change_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	int action = *((int *)data);

Don't use pointers for so simple things, pointers forces the compiler to 
setup a stack frame and save the data into stack. Instead do:

	int action = (int)data;

> +	pte_t pte_val;
> +
> +	// invalidate the PTE so it's safe to modify
> +	pte_val = ptep_get_and_clear(&init_mm, addr, ptep);
> +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

Why flush a range for a single page ? On most targets this will do a 
tlbia which is heavy, while a tlbie would suffice.

I think flush_tlb_kernel_range() should be replaced by something 
flushing only a single page.

> +
> +	// modify the PTE bits as desired, then apply
> +	switch (action) {
> +	case SET_MEMORY_RO:
> +		pte_val = pte_wrprotect(pte_val);
> +		break;
> +	case SET_MEMORY_RW:
> +		pte_val = pte_mkwrite(pte_val);
> +		break;
> +	case SET_MEMORY_NX:
> +		pte_val = pte_exprotect(pte_val);
> +		break;
> +	case SET_MEMORY_X:
> +		pte_val = pte_mkexec(pte_val);
> +		break;
> +	default:
> +		WARN_ON(true);
> +		return -EINVAL;

Is it worth checking that the action is valid for each page ? I think 
validity of action should be checked in change_memory_attr(). All other 
functions are static so you know they won't be called from outside.

Once done, you can squash __change_page_attr() into change_page_attr(), 
remove the ret var and return 0 all the time.

> +	}
> +
> +	set_pte_at(&init_mm, addr, ptep, pte_val);
> +
> +	return 0;
> +}
> +
> +static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	int ret;
> +
> +	spin_lock(&init_mm.page_table_lock);
> +	ret = __change_page_attr(ptep, addr, data);
> +	spin_unlock(&init_mm.page_table_lock);
> +
> +	return ret;
> +}
> +
> +int change_memory_attr(unsigned long addr, int numpages, int action)
> +{
> +	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> +	unsigned long size = numpages * PAGE_SIZE;
> +
> +	if (!numpages)
> +		return 0;
> +
> +	return apply_to_page_range(&init_mm, start, size, change_page_attr, &action);

Use (void*)action instead of &action (see upper comment)

> +}
> 

Christophe
Christophe Leroy Jan. 20, 2020, 8:35 a.m. UTC | #2
Le 24/12/2019 à 06:55, Russell Currey a écrit :
> The set_memory_{ro/rw/nx/x}() functions are required for STRICT_MODULE_RWX,
> and are generally useful primitives to have.  This implementation is
> designed to be completely generic across powerpc's many MMUs.
> 
> It's possible that this could be optimised to be faster for specific
> MMUs, but the focus is on having a generic and safe implementation for
> now.
> 
> This implementation does not handle cases where the caller is attempting
> to change the mapping of the page it is executing from, or if another
> CPU is concurrently using the page being altered.  These cases likely
> shouldn't happen, but a more complex implementation with MMU-specific code
> could safely handle them, so that is left as a TODO for now.
> 
> Signed-off-by: Russell Currey <ruscur@russell.cc>
> ---
>   arch/powerpc/Kconfig                  |  1 +
>   arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
>   arch/powerpc/mm/Makefile              |  1 +
>   arch/powerpc/mm/pageattr.c            | 83 +++++++++++++++++++++++++++
>   4 files changed, 117 insertions(+)
>   create mode 100644 arch/powerpc/include/asm/set_memory.h
>   create mode 100644 arch/powerpc/mm/pageattr.c
> 
> +static int __change_page_attr(pte_t *ptep, unsigned long addr, void *data)
> +{
> +	int action = *((int *)data);
> +	pte_t pte_val;

pte_val is really not a good naming, because pte_val() is already a 
function which returns the value of a pte_t var.

Here you should name it 'pte' as usual.

Christophe

> +
> +	// invalidate the PTE so it's safe to modify
> +	pte_val = ptep_get_and_clear(&init_mm, addr, ptep);
> +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> +
> +	// modify the PTE bits as desired, then apply
> +	switch (action) {
> +	case SET_MEMORY_RO:
> +		pte_val = pte_wrprotect(pte_val);
> +		break;
> +	case SET_MEMORY_RW:
> +		pte_val = pte_mkwrite(pte_val);
> +		break;
> +	case SET_MEMORY_NX:
> +		pte_val = pte_exprotect(pte_val);
> +		break;
> +	case SET_MEMORY_X:
> +		pte_val = pte_mkexec(pte_val);
> +		break;
> +	default:
> +		WARN_ON(true);
> +		return -EINVAL;
> +	}
> +
> +	set_pte_at(&init_mm, addr, ptep, pte_val);
> +
> +	return 0;
> +}
> +
Russell Currey Feb. 3, 2020, 12:46 a.m. UTC | #3
On Wed, 2020-01-08 at 13:52 +0100, Christophe Leroy wrote:
> 
> Le 24/12/2019 à 06:55, Russell Currey a écrit :
> > The set_memory_{ro/rw/nx/x}() functions are required for
> > STRICT_MODULE_RWX,
> > and are generally useful primitives to have.  This implementation
> > is
> > designed to be completely generic across powerpc's many MMUs.
> > 
> > It's possible that this could be optimised to be faster for
> > specific
> > MMUs, but the focus is on having a generic and safe implementation
> > for
> > now.
> > 
> > This implementation does not handle cases where the caller is
> > attempting
> > to change the mapping of the page it is executing from, or if
> > another
> > CPU is concurrently using the page being altered.  These cases
> > likely
> > shouldn't happen, but a more complex implementation with MMU-
> > specific code
> > could safely handle them, so that is left as a TODO for now.
> > 
> > Signed-off-by: Russell Currey <ruscur@russell.cc>
> > ---
> >   arch/powerpc/Kconfig                  |  1 +
> >   arch/powerpc/include/asm/set_memory.h | 32 +++++++++++
> >   arch/powerpc/mm/Makefile              |  1 +
> >   arch/powerpc/mm/pageattr.c            | 83
> > +++++++++++++++++++++++++++
> >   4 files changed, 117 insertions(+)
> >   create mode 100644 arch/powerpc/include/asm/set_memory.h
> >   create mode 100644 arch/powerpc/mm/pageattr.c
> > 
> > diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> > index 1ec34e16ed65..f0b9b47b5353 100644
> > --- a/arch/powerpc/Kconfig
> > +++ b/arch/powerpc/Kconfig
> > @@ -133,6 +133,7 @@ config PPC
> >   	select ARCH_HAS_PTE_SPECIAL
> >   	select ARCH_HAS_MEMBARRIER_CALLBACKS
> >   	select ARCH_HAS_SCALED_CPUTIME		if
> > VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
> > +	select ARCH_HAS_SET_MEMORY
> >   	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 ||
> > PPC32) && !RELOCATABLE && !HIBERNATION)
> >   	select ARCH_HAS_TICK_BROADCAST		if
> > GENERIC_CLOCKEVENTS_BROADCAST
> >   	select ARCH_HAS_UACCESS_FLUSHCACHE
> > diff --git a/arch/powerpc/include/asm/set_memory.h
> > b/arch/powerpc/include/asm/set_memory.h
> > new file mode 100644
> > index 000000000000..5230ddb2fefd
> > --- /dev/null
> > +++ b/arch/powerpc/include/asm/set_memory.h
> > @@ -0,0 +1,32 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ASM_POWERPC_SET_MEMORY_H
> > +#define _ASM_POWERPC_SET_MEMORY_H
> > +
> > +#define SET_MEMORY_RO	1
> > +#define SET_MEMORY_RW	2
> > +#define SET_MEMORY_NX	3
> > +#define SET_MEMORY_X	4
> 
> Maybe going from 0 to 3 would be better than 1 to 4
> 
> > +
> > +int change_memory_attr(unsigned long addr, int numpages, int
> > action);
> 
> action could be unsigned.
> 
> > +
> > +static inline int set_memory_ro(unsigned long addr, int numpages)
> > +{
> > +	return change_memory_attr(addr, numpages, SET_MEMORY_RO);
> > +}
> > +
> > +static inline int set_memory_rw(unsigned long addr, int numpages)
> > +{
> > +	return change_memory_attr(addr, numpages, SET_MEMORY_RW);
> > +}
> > +
> > +static inline int set_memory_nx(unsigned long addr, int numpages)
> > +{
> > +	return change_memory_attr(addr, numpages, SET_MEMORY_NX);
> > +}
> > +
> > +static inline int set_memory_x(unsigned long addr, int numpages)
> > +{
> > +	return change_memory_attr(addr, numpages, SET_MEMORY_X);
> > +}
> > +
> > +#endif
> > diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
> > index 5e147986400d..d0a0bcbc9289 100644
> > --- a/arch/powerpc/mm/Makefile
> > +++ b/arch/powerpc/mm/Makefile
> > @@ -20,3 +20,4 @@ obj-$(CONFIG_HIGHMEM)		+= highmem.o
> >   obj-$(CONFIG_PPC_COPRO_BASE)	+= copro_fault.o
> >   obj-$(CONFIG_PPC_PTDUMP)	+= ptdump/
> >   obj-$(CONFIG_KASAN)		+= kasan/
> > +obj-$(CONFIG_ARCH_HAS_SET_MEMORY) += pageattr.o
> 
> CONFIG_ARCH_HAS_SET_MEMORY is set inconditionnally, I think you
> should 
> add pageattr.o to obj-y instead. CONFIG_ARCH_HAS_XXX are almost
> never 
> used in Makefiles

Fair enough, will keep that in mind

> 
> > diff --git a/arch/powerpc/mm/pageattr.c
> > b/arch/powerpc/mm/pageattr.c
> > new file mode 100644
> > index 000000000000..15d5fb04f531
> > --- /dev/null
> > +++ b/arch/powerpc/mm/pageattr.c
> > @@ -0,0 +1,83 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +
> > +/*
> > + * MMU-generic set_memory implementation for powerpc
> > + *
> > + * Copyright 2019, IBM Corporation.
> > + */
> > +
> > +#include <linux/mm.h>
> > +#include <linux/set_memory.h>
> > +
> > +#include <asm/mmu.h>
> > +#include <asm/page.h>
> > +#include <asm/pgtable.h>
> > +
> > +
> > +/*
> > + * Updates the attributes of a page in three steps:
> > + *
> > + * 1. invalidate the page table entry
> > + * 2. flush the TLB
> > + * 3. install the new entry with the updated attributes
> > + *
> > + * This is unsafe if the caller is attempting to change the
> > mapping of the
> > + * page it is executing from, or if another CPU is concurrently
> > using the
> > + * page being altered.
> > + *
> > + * TODO make the implementation resistant to this.
> > + */
> > +static int __change_page_attr(pte_t *ptep, unsigned long addr,
> > void *data)
> > +{
> > +	int action = *((int *)data);
> 
> Don't use pointers for so simple things, pointers forces the compiler
> to 
> setup a stack frame and save the data into stack. Instead do:
> 
> 	int action = (int)data;
> 
> > +	pte_t pte_val;
> > +
> > +	// invalidate the PTE so it's safe to modify
> > +	pte_val = ptep_get_and_clear(&init_mm, addr, ptep);
> > +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
> 
> Why flush a range for a single page ? On most targets this will do a 
> tlbia which is heavy, while a tlbie would suffice.
> 
> I think flush_tlb_kernel_range() should be replaced by something 
> flushing only a single page.

You might be able to help me out here, I wanted to do that but the only
functions I could find that flushed single pages needed a
vm_area_struct, which I can't get.

> 
> > +
> > +	// modify the PTE bits as desired, then apply
> > +	switch (action) {
> > +	case SET_MEMORY_RO:
> > +		pte_val = pte_wrprotect(pte_val);
> > +		break;
> > +	case SET_MEMORY_RW:
> > +		pte_val = pte_mkwrite(pte_val);
> > +		break;
> > +	case SET_MEMORY_NX:
> > +		pte_val = pte_exprotect(pte_val);
> > +		break;
> > +	case SET_MEMORY_X:
> > +		pte_val = pte_mkexec(pte_val);
> > +		break;
> > +	default:
> > +		WARN_ON(true);
> > +		return -EINVAL;
> 
> Is it worth checking that the action is valid for each page ? I
> think 
> validity of action should be checked in change_memory_attr(). All
> other 
> functions are static so you know they won't be called from outside.
> 
> Once done, you can squash __change_page_attr() into
> change_page_attr(), 
> remove the ret var and return 0 all the time.

Makes sense to fold things into a single function, but in terms of
performance it shouldn't make a difference, right?  I still have to
check the action to determine what to change (unless I replace passing
SET_MEMORY_RO into apply_to_page_range() with a function pointer to
pte_wrprotect() for example).  

> 
> > +	}
> > +
> > +	set_pte_at(&init_mm, addr, ptep, pte_val);
> > +
> > +	return 0;
> > +}
> > +
> > +static int change_page_attr(pte_t *ptep, unsigned long addr, void
> > *data)
> > +{
> > +	int ret;
> > +
> > +	spin_lock(&init_mm.page_table_lock);
> > +	ret = __change_page_attr(ptep, addr, data);
> > +	spin_unlock(&init_mm.page_table_lock);
> > +
> > +	return ret;
> > +}
> > +
> > +int change_memory_attr(unsigned long addr, int numpages, int
> > action)
> > +{
> > +	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
> > +	unsigned long size = numpages * PAGE_SIZE;
> > +
> > +	if (!numpages)
> > +		return 0;
> > +
> > +	return apply_to_page_range(&init_mm, start, size,
> > change_page_attr, &action);
> 
> Use (void*)action instead of &action (see upper comment)

To get this to work I had to use (void *)(size_t)action to stop the
compiler from complaining about casting an int to a void*, is there a
better way to go about it?  Works fine, just looks gross.

> 
> > +}
> > 
> 
> Christophe
>
Christophe Leroy Feb. 3, 2020, 7:06 a.m. UTC | #4
Le 03/02/2020 à 01:46, Russell Currey a écrit :
> On Wed, 2020-01-08 at 13:52 +0100, Christophe Leroy wrote:
>>
>> Le 24/12/2019 à 06:55, Russell Currey a écrit :
>>> diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
>>> index 5e147986400d..d0a0bcbc9289 100644
>>> --- a/arch/powerpc/mm/Makefile
>>> +++ b/arch/powerpc/mm/Makefile
>>> @@ -20,3 +20,4 @@ obj-$(CONFIG_HIGHMEM)		+= highmem.o
>>>    obj-$(CONFIG_PPC_COPRO_BASE)	+= copro_fault.o
>>>    obj-$(CONFIG_PPC_PTDUMP)	+= ptdump/
>>>    obj-$(CONFIG_KASAN)		+= kasan/
>>> +obj-$(CONFIG_ARCH_HAS_SET_MEMORY) += pageattr.o
>>
>> CONFIG_ARCH_HAS_SET_MEMORY is set inconditionnally, I think you
>> should
>> add pageattr.o to obj-y instead. CONFIG_ARCH_HAS_XXX are almost
>> never
>> used in Makefiles
> 
> Fair enough, will keep that in mind

I forgot I commented that. I'll do it in v3.

>>> +	pte_t pte_val;
>>> +
>>> +	// invalidate the PTE so it's safe to modify
>>> +	pte_val = ptep_get_and_clear(&init_mm, addr, ptep);
>>> +	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
>>
>> Why flush a range for a single page ? On most targets this will do a
>> tlbia which is heavy, while a tlbie would suffice.
>>
>> I think flush_tlb_kernel_range() should be replaced by something
>> flushing only a single page.
> 
> You might be able to help me out here, I wanted to do that but the only
> functions I could find that flushed single pages needed a
> vm_area_struct, which I can't get.

I sent out two patches for that, one for book3s/32 and one for nohash:
https://patchwork.ozlabs.org/patch/1231983/
https://patchwork.ozlabs.org/patch/1232223/

Maybe one for book3s/64 would be needed as well ? Can you do it if needed ?


> 
>>
>>> +
>>> +	// modify the PTE bits as desired, then apply
>>> +	switch (action) {
>>> +	case SET_MEMORY_RO:
>>> +		pte_val = pte_wrprotect(pte_val);
>>> +		break;
>>> +	case SET_MEMORY_RW:
>>> +		pte_val = pte_mkwrite(pte_val);
>>> +		break;
>>> +	case SET_MEMORY_NX:
>>> +		pte_val = pte_exprotect(pte_val);
>>> +		break;
>>> +	case SET_MEMORY_X:
>>> +		pte_val = pte_mkexec(pte_val);
>>> +		break;
>>> +	default:
>>> +		WARN_ON(true);
>>> +		return -EINVAL;
>>
>> Is it worth checking that the action is valid for each page ? I
>> think
>> validity of action should be checked in change_memory_attr(). All
>> other
>> functions are static so you know they won't be called from outside.
>>
>> Once done, you can squash __change_page_attr() into
>> change_page_attr(),
>> remove the ret var and return 0 all the time.
> 
> Makes sense to fold things into a single function, but in terms of
> performance it shouldn't make a difference, right?  I still have to
> check the action to determine what to change (unless I replace passing
> SET_MEMORY_RO into apply_to_page_range() with a function pointer to
> pte_wrprotect() for example).

pte_wrprotect() is a static inline.

> 
>>
>>> +	}
>>> +
>>> +	set_pte_at(&init_mm, addr, ptep, pte_val);
>>> +
>>> +	return 0;
>>> +}
>>> +
>>> +static int change_page_attr(pte_t *ptep, unsigned long addr, void
>>> *data)
>>> +{
>>> +	int ret;
>>> +
>>> +	spin_lock(&init_mm.page_table_lock);
>>> +	ret = __change_page_attr(ptep, addr, data);
>>> +	spin_unlock(&init_mm.page_table_lock);
>>> +
>>> +	return ret;
>>> +}
>>> +
>>> +int change_memory_attr(unsigned long addr, int numpages, int
>>> action)
>>> +{
>>> +	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
>>> +	unsigned long size = numpages * PAGE_SIZE;
>>> +
>>> +	if (!numpages)
>>> +		return 0;
>>> +
>>> +	return apply_to_page_range(&init_mm, start, size,
>>> change_page_attr, &action);
>>
>> Use (void*)action instead of &action (see upper comment)
> 
> To get this to work I had to use (void *)(size_t)action to stop the
> compiler from complaining about casting an int to a void*, is there a
> better way to go about it?  Works fine, just looks gross.

Yes, use long instead (see my v3)

Christophe
diff mbox series

Patch

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1ec34e16ed65..f0b9b47b5353 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -133,6 +133,7 @@  config PPC
 	select ARCH_HAS_PTE_SPECIAL
 	select ARCH_HAS_MEMBARRIER_CALLBACKS
 	select ARCH_HAS_SCALED_CPUTIME		if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
+	select ARCH_HAS_SET_MEMORY
 	select ARCH_HAS_STRICT_KERNEL_RWX	if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
 	select ARCH_HAS_TICK_BROADCAST		if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAS_UACCESS_FLUSHCACHE
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..5230ddb2fefd
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,32 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO	1
+#define SET_MEMORY_RW	2
+#define SET_MEMORY_NX	3
+#define SET_MEMORY_X	4
+
+int change_memory_attr(unsigned long addr, int numpages, int action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+	return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+#endif
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5e147986400d..d0a0bcbc9289 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -20,3 +20,4 @@  obj-$(CONFIG_HIGHMEM)		+= highmem.o
 obj-$(CONFIG_PPC_COPRO_BASE)	+= copro_fault.o
 obj-$(CONFIG_PPC_PTDUMP)	+= ptdump/
 obj-$(CONFIG_KASAN)		+= kasan/
+obj-$(CONFIG_ARCH_HAS_SET_MEMORY) += pageattr.o
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..15d5fb04f531
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,83 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * This is unsafe if the caller is attempting to change the mapping of the
+ * page it is executing from, or if another CPU is concurrently using the
+ * page being altered.
+ *
+ * TODO make the implementation resistant to this.
+ */
+static int __change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+	int action = *((int *)data);
+	pte_t pte_val;
+
+	// invalidate the PTE so it's safe to modify
+	pte_val = ptep_get_and_clear(&init_mm, addr, ptep);
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+	// modify the PTE bits as desired, then apply
+	switch (action) {
+	case SET_MEMORY_RO:
+		pte_val = pte_wrprotect(pte_val);
+		break;
+	case SET_MEMORY_RW:
+		pte_val = pte_mkwrite(pte_val);
+		break;
+	case SET_MEMORY_NX:
+		pte_val = pte_exprotect(pte_val);
+		break;
+	case SET_MEMORY_X:
+		pte_val = pte_mkexec(pte_val);
+		break;
+	default:
+		WARN_ON(true);
+		return -EINVAL;
+	}
+
+	set_pte_at(&init_mm, addr, ptep, pte_val);
+
+	return 0;
+}
+
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+	int ret;
+
+	spin_lock(&init_mm.page_table_lock);
+	ret = __change_page_attr(ptep, addr, data);
+	spin_unlock(&init_mm.page_table_lock);
+
+	return ret;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, int action)
+{
+	unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+	unsigned long size = numpages * PAGE_SIZE;
+
+	if (!numpages)
+		return 0;
+
+	return apply_to_page_range(&init_mm, start, size, change_page_attr, &action);
+}