diff mbox series

[v7,7/7] powerpc/32: use set_memory_attr()

Message ID 20200331044825.591653-8-ruscur@russell.cc (mailing list archive)
State New, archived
Headers show
Series set_memory() routines and STRICT_MODULE_RWX | expand

Commit Message

Russell Currey March 31, 2020, 4:48 a.m. UTC
From: Christophe Leroy <christophe.leroy@c-s.fr>

Use set_memory_attr() instead of the PPC32 specific change_page_attr()

change_page_attr() was checking that the address was not mapped by
blocks and was handling highmem, but that's unneeded because the
affected pages can't be in highmem and block mapping verification
is already done by the callers.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
 arch/powerpc/mm/pgtable_32.c | 95 ++++--------------------------------
 1 file changed, 10 insertions(+), 85 deletions(-)

Comments

Christophe Leroy March 31, 2020, 9:56 a.m. UTC | #1
Le 31/03/2020 à 06:48, Russell Currey a écrit :
> From: Christophe Leroy <christophe.leroy@c-s.fr>
> 
> Use set_memory_attr() instead of the PPC32 specific change_page_attr()
> 
> change_page_attr() was checking that the address was not mapped by
> blocks and was handling highmem, but that's unneeded because the
> affected pages can't be in highmem and block mapping verification
> is already done by the callers.
> 
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> ---
>   arch/powerpc/mm/pgtable_32.c | 95 ++++--------------------------------
>   1 file changed, 10 insertions(+), 85 deletions(-)
> 
> diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
> index 5fb90edd865e..3d92eaf3ee2f 100644
> --- a/arch/powerpc/mm/pgtable_32.c
> +++ b/arch/powerpc/mm/pgtable_32.c
> @@ -23,6 +23,7 @@
>   #include <linux/highmem.h>
>   #include <linux/memblock.h>
>   #include <linux/slab.h>
> +#include <linux/set_memory.h>
>   
>   #include <asm/pgtable.h>
>   #include <asm/pgalloc.h>
> @@ -121,99 +122,20 @@ void __init mapin_ram(void)
>   	}
>   }
>   
> -/* Scan the real Linux page tables and return a PTE pointer for
> - * a virtual address in a context.
> - * Returns true (1) if PTE was found, zero otherwise.  The pointer to
> - * the PTE pointer is unmodified if PTE is not found.
> - */
> -static int
> -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)


This will conflict, get_pteptr() is gone now, see 
https://github.com/linuxppc/linux/commit/2efc7c085f05870eda6f29ac71eeb83f3bd54415

Christophe



> -{
> -        pgd_t	*pgd;
> -	pud_t	*pud;
> -        pmd_t	*pmd;
> -        pte_t	*pte;
> -        int     retval = 0;
> -
> -        pgd = pgd_offset(mm, addr & PAGE_MASK);
> -        if (pgd) {
> -		pud = pud_offset(pgd, addr & PAGE_MASK);
> -		if (pud && pud_present(*pud)) {
> -			pmd = pmd_offset(pud, addr & PAGE_MASK);
> -			if (pmd_present(*pmd)) {
> -				pte = pte_offset_map(pmd, addr & PAGE_MASK);
> -				if (pte) {
> -					retval = 1;
> -					*ptep = pte;
> -					if (pmdp)
> -						*pmdp = pmd;
> -					/* XXX caller needs to do pte_unmap, yuck */
> -				}
> -			}
> -		}
> -        }
> -        return(retval);
> -}
> -
> -static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
> -{
> -	pte_t *kpte;
> -	pmd_t *kpmd;
> -	unsigned long address;
> -
> -	BUG_ON(PageHighMem(page));
> -	address = (unsigned long)page_address(page);
> -
> -	if (v_block_mapped(address))
> -		return 0;
> -	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
> -		return -EINVAL;
> -	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
> -	pte_unmap(kpte);
> -
> -	return 0;
> -}
> -
> -/*
> - * Change the page attributes of an page in the linear mapping.
> - *
> - * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
> - */
> -static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
> -{
> -	int i, err = 0;
> -	unsigned long flags;
> -	struct page *start = page;
> -
> -	local_irq_save(flags);
> -	for (i = 0; i < numpages; i++, page++) {
> -		err = __change_page_attr_noflush(page, prot);
> -		if (err)
> -			break;
> -	}
> -	wmb();
> -	local_irq_restore(flags);
> -	flush_tlb_kernel_range((unsigned long)page_address(start),
> -			       (unsigned long)page_address(page));
> -	return err;
> -}
> -
>   void mark_initmem_nx(void)
>   {
> -	struct page *page = virt_to_page(_sinittext);
>   	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
>   				 PFN_DOWN((unsigned long)_sinittext);
>   
>   	if (v_block_mapped((unsigned long)_stext + 1))
>   		mmu_mark_initmem_nx();
>   	else
> -		change_page_attr(page, numpages, PAGE_KERNEL);
> +		set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
>   }
>   
>   #ifdef CONFIG_STRICT_KERNEL_RWX
>   void mark_rodata_ro(void)
>   {
> -	struct page *page;
>   	unsigned long numpages;
>   
>   	if (v_block_mapped((unsigned long)_sinittext)) {
> @@ -222,20 +144,18 @@ void mark_rodata_ro(void)
>   		return;
>   	}
>   
> -	page = virt_to_page(_stext);
>   	numpages = PFN_UP((unsigned long)_etext) -
>   		   PFN_DOWN((unsigned long)_stext);
>   
> -	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
> +	set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
>   	/*
>   	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
>   	 * to cover NOTES and EXCEPTION_TABLE.
>   	 */
> -	page = virt_to_page(__start_rodata);
>   	numpages = PFN_UP((unsigned long)__init_begin) -
>   		   PFN_DOWN((unsigned long)__start_rodata);
>   
> -	change_page_attr(page, numpages, PAGE_KERNEL_RO);
> +	set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
>   
>   	// mark_initmem_nx() should have already run by now
>   	ptdump_check_wx();
> @@ -245,9 +165,14 @@ void mark_rodata_ro(void)
>   #ifdef CONFIG_DEBUG_PAGEALLOC
>   void __kernel_map_pages(struct page *page, int numpages, int enable)
>   {
> +	unsigned long addr = (unsigned long)page_address(page);
> +
>   	if (PageHighMem(page))
>   		return;
>   
> -	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
> +	if (enable)
> +		set_memory_attr(addr, numpages, PAGE_KERNEL);
> +	else
> +		set_memory_attr(addr, numpages, __pgprot(0));
>   }
>   #endif /* CONFIG_DEBUG_PAGEALLOC */
>
Russell Currey April 1, 2020, 2:27 a.m. UTC | #2
On Tue, 2020-03-31 at 11:56 +0200, Christophe Leroy wrote:
> 
> Le 31/03/2020 à 06:48, Russell Currey a écrit :
> > From: Christophe Leroy <christophe.leroy@c-s.fr>
> > 
> > Use set_memory_attr() instead of the PPC32 specific
> > change_page_attr()
> > 
> > change_page_attr() was checking that the address was not mapped by
> > blocks and was handling highmem, but that's unneeded because the
> > affected pages can't be in highmem and block mapping verification
> > is already done by the callers.
> > 
> > Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> > ---
> >   arch/powerpc/mm/pgtable_32.c | 95 ++++---------------------------
> > -----
> >   1 file changed, 10 insertions(+), 85 deletions(-)
> > 
> > diff --git a/arch/powerpc/mm/pgtable_32.c
> > b/arch/powerpc/mm/pgtable_32.c
> > index 5fb90edd865e..3d92eaf3ee2f 100644
> > --- a/arch/powerpc/mm/pgtable_32.c
> > +++ b/arch/powerpc/mm/pgtable_32.c
> > @@ -23,6 +23,7 @@
> >   #include <linux/highmem.h>
> >   #include <linux/memblock.h>
> >   #include <linux/slab.h>
> > +#include <linux/set_memory.h>
> >   
> >   #include <asm/pgtable.h>
> >   #include <asm/pgalloc.h>
> > @@ -121,99 +122,20 @@ void __init mapin_ram(void)
> >   	}
> >   }
> >   
> > -/* Scan the real Linux page tables and return a PTE pointer for
> > - * a virtual address in a context.
> > - * Returns true (1) if PTE was found, zero otherwise.  The pointer
> > to
> > - * the PTE pointer is unmodified if PTE is not found.
> > - */
> > -static int
> > -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
> > pmd_t **pmdp)
> 
> This will conflict, get_pteptr() is gone now, see 
> https://github.com/linuxppc/linux/commit/2efc7c085f05870eda6f29ac71eeb83f3bd54415
> 
> Christophe

OK cool, so I can just drop that hunk?  Will try that and make sure it
rebases on powerpc/next

- Russell

> 
> 
> > -{
> > -        pgd_t	*pgd;
> > -	pud_t	*pud;
> > -        pmd_t	*pmd;
> > -        pte_t	*pte;
> > -        int     retval = 0;
> > -
> > -        pgd = pgd_offset(mm, addr & PAGE_MASK);
> > -        if (pgd) {
> > -		pud = pud_offset(pgd, addr & PAGE_MASK);
> > -		if (pud && pud_present(*pud)) {
> > -			pmd = pmd_offset(pud, addr & PAGE_MASK);
> > -			if (pmd_present(*pmd)) {
> > -				pte = pte_offset_map(pmd, addr &
> > PAGE_MASK);
> > -				if (pte) {
> > -					retval = 1;
> > -					*ptep = pte;
> > -					if (pmdp)
> > -						*pmdp = pmd;
> > -					/* XXX caller needs to do
> > pte_unmap, yuck */
> > -				}
> > -			}
> > -		}
> > -        }
> > -        return(retval);
> > -}
> > -
> > -static int __change_page_attr_noflush(struct page *page, pgprot_t
> > prot)
> > -{
> > -	pte_t *kpte;
> > -	pmd_t *kpmd;
> > -	unsigned long address;
> > -
> > -	BUG_ON(PageHighMem(page));
> > -	address = (unsigned long)page_address(page);
> > -
> > -	if (v_block_mapped(address))
> > -		return 0;
> > -	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
> > -		return -EINVAL;
> > -	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
> > -	pte_unmap(kpte);
> > -
> > -	return 0;
> > -}
> > -
> > -/*
> > - * Change the page attributes of an page in the linear mapping.
> > - *
> > - * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
> > - */
> > -static int change_page_attr(struct page *page, int numpages,
> > pgprot_t prot)
> > -{
> > -	int i, err = 0;
> > -	unsigned long flags;
> > -	struct page *start = page;
> > -
> > -	local_irq_save(flags);
> > -	for (i = 0; i < numpages; i++, page++) {
> > -		err = __change_page_attr_noflush(page, prot);
> > -		if (err)
> > -			break;
> > -	}
> > -	wmb();
> > -	local_irq_restore(flags);
> > -	flush_tlb_kernel_range((unsigned long)page_address(start),
> > -			       (unsigned long)page_address(page));
> > -	return err;
> > -}
> > -
> >   void mark_initmem_nx(void)
> >   {
> > -	struct page *page = virt_to_page(_sinittext);
> >   	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
> >   				 PFN_DOWN((unsigned long)_sinittext);
> >   
> >   	if (v_block_mapped((unsigned long)_stext + 1))
> >   		mmu_mark_initmem_nx();
> >   	else
> > -		change_page_attr(page, numpages, PAGE_KERNEL);
> > +		set_memory_attr((unsigned long)_sinittext, numpages,
> > PAGE_KERNEL);
> >   }
> >   
> >   #ifdef CONFIG_STRICT_KERNEL_RWX
> >   void mark_rodata_ro(void)
> >   {
> > -	struct page *page;
> >   	unsigned long numpages;
> >   
> >   	if (v_block_mapped((unsigned long)_sinittext)) {
> > @@ -222,20 +144,18 @@ void mark_rodata_ro(void)
> >   		return;
> >   	}
> >   
> > -	page = virt_to_page(_stext);
> >   	numpages = PFN_UP((unsigned long)_etext) -
> >   		   PFN_DOWN((unsigned long)_stext);
> >   
> > -	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
> > +	set_memory_attr((unsigned long)_stext, numpages,
> > PAGE_KERNEL_ROX);
> >   	/*
> >   	 * mark .rodata as read only. Use __init_begin rather than
> > __end_rodata
> >   	 * to cover NOTES and EXCEPTION_TABLE.
> >   	 */
> > -	page = virt_to_page(__start_rodata);
> >   	numpages = PFN_UP((unsigned long)__init_begin) -
> >   		   PFN_DOWN((unsigned long)__start_rodata);
> >   
> > -	change_page_attr(page, numpages, PAGE_KERNEL_RO);
> > +	set_memory_attr((unsigned long)__start_rodata, numpages,
> > PAGE_KERNEL_RO);
> >   
> >   	// mark_initmem_nx() should have already run by now
> >   	ptdump_check_wx();
> > @@ -245,9 +165,14 @@ void mark_rodata_ro(void)
> >   #ifdef CONFIG_DEBUG_PAGEALLOC
> >   void __kernel_map_pages(struct page *page, int numpages, int
> > enable)
> >   {
> > +	unsigned long addr = (unsigned long)page_address(page);
> > +
> >   	if (PageHighMem(page))
> >   		return;
> >   
> > -	change_page_attr(page, numpages, enable ? PAGE_KERNEL :
> > __pgprot(0));
> > +	if (enable)
> > +		set_memory_attr(addr, numpages, PAGE_KERNEL);
> > +	else
> > +		set_memory_attr(addr, numpages, __pgprot(0));
> >   }
> >   #endif /* CONFIG_DEBUG_PAGEALLOC */
> >
Christophe Leroy April 1, 2020, 5:50 a.m. UTC | #3
Le 01/04/2020 à 04:27, Russell Currey a écrit :
> On Tue, 2020-03-31 at 11:56 +0200, Christophe Leroy wrote:
>>
>> Le 31/03/2020 à 06:48, Russell Currey a écrit :
>>> From: Christophe Leroy <christophe.leroy@c-s.fr>
>>>
>>> Use set_memory_attr() instead of the PPC32 specific
>>> change_page_attr()
>>>
>>> change_page_attr() was checking that the address was not mapped by
>>> blocks and was handling highmem, but that's unneeded because the
>>> affected pages can't be in highmem and block mapping verification
>>> is already done by the callers.
>>>
>>> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
>>> ---
>>>    arch/powerpc/mm/pgtable_32.c | 95 ++++---------------------------
>>> -----
>>>    1 file changed, 10 insertions(+), 85 deletions(-)
>>>
>>> diff --git a/arch/powerpc/mm/pgtable_32.c
>>> b/arch/powerpc/mm/pgtable_32.c
>>> index 5fb90edd865e..3d92eaf3ee2f 100644
>>> --- a/arch/powerpc/mm/pgtable_32.c
>>> +++ b/arch/powerpc/mm/pgtable_32.c
>>> @@ -23,6 +23,7 @@
>>>    #include <linux/highmem.h>
>>>    #include <linux/memblock.h>
>>>    #include <linux/slab.h>
>>> +#include <linux/set_memory.h>
>>>    
>>>    #include <asm/pgtable.h>
>>>    #include <asm/pgalloc.h>
>>> @@ -121,99 +122,20 @@ void __init mapin_ram(void)
>>>    	}
>>>    }
>>>    
>>> -/* Scan the real Linux page tables and return a PTE pointer for
>>> - * a virtual address in a context.
>>> - * Returns true (1) if PTE was found, zero otherwise.  The pointer
>>> to
>>> - * the PTE pointer is unmodified if PTE is not found.
>>> - */
>>> -static int
>>> -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
>>> pmd_t **pmdp)
>>
>> This will conflict, get_pteptr() is gone now, see
>> https://github.com/linuxppc/linux/commit/2efc7c085f05870eda6f29ac71eeb83f3bd54415
>>
>> Christophe
> 
> OK cool, so I can just drop that hunk?  Will try that and make sure it
> rebases on powerpc/next

Well, you still have to remove __change_page_attr_noflush() and 
change_page_attr(), everything else in this patch should still be the same.

Christophe

> 
> - Russell
> 
>>
>>
>>> -{
>>> -        pgd_t	*pgd;
>>> -	pud_t	*pud;
>>> -        pmd_t	*pmd;
>>> -        pte_t	*pte;
>>> -        int     retval = 0;
>>> -
>>> -        pgd = pgd_offset(mm, addr & PAGE_MASK);
>>> -        if (pgd) {
>>> -		pud = pud_offset(pgd, addr & PAGE_MASK);
>>> -		if (pud && pud_present(*pud)) {
>>> -			pmd = pmd_offset(pud, addr & PAGE_MASK);
>>> -			if (pmd_present(*pmd)) {
>>> -				pte = pte_offset_map(pmd, addr &
>>> PAGE_MASK);
>>> -				if (pte) {
>>> -					retval = 1;
>>> -					*ptep = pte;
>>> -					if (pmdp)
>>> -						*pmdp = pmd;
>>> -					/* XXX caller needs to do
>>> pte_unmap, yuck */
>>> -				}
>>> -			}
>>> -		}
>>> -        }
>>> -        return(retval);
>>> -}
>>> -
>>> -static int __change_page_attr_noflush(struct page *page, pgprot_t
>>> prot)
>>> -{
>>> -	pte_t *kpte;
>>> -	pmd_t *kpmd;
>>> -	unsigned long address;
>>> -
>>> -	BUG_ON(PageHighMem(page));
>>> -	address = (unsigned long)page_address(page);
>>> -
>>> -	if (v_block_mapped(address))
>>> -		return 0;
>>> -	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
>>> -		return -EINVAL;
>>> -	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
>>> -	pte_unmap(kpte);
>>> -
>>> -	return 0;
>>> -}
>>> -
>>> -/*
>>> - * Change the page attributes of an page in the linear mapping.
>>> - *
>>> - * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
>>> - */
>>> -static int change_page_attr(struct page *page, int numpages,
>>> pgprot_t prot)
>>> -{
>>> -	int i, err = 0;
>>> -	unsigned long flags;
>>> -	struct page *start = page;
>>> -
>>> -	local_irq_save(flags);
>>> -	for (i = 0; i < numpages; i++, page++) {
>>> -		err = __change_page_attr_noflush(page, prot);
>>> -		if (err)
>>> -			break;
>>> -	}
>>> -	wmb();
>>> -	local_irq_restore(flags);
>>> -	flush_tlb_kernel_range((unsigned long)page_address(start),
>>> -			       (unsigned long)page_address(page));
>>> -	return err;
>>> -}
>>> -
>>>    void mark_initmem_nx(void)
>>>    {
>>> -	struct page *page = virt_to_page(_sinittext);
>>>    	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
>>>    				 PFN_DOWN((unsigned long)_sinittext);
>>>    
>>>    	if (v_block_mapped((unsigned long)_stext + 1))
>>>    		mmu_mark_initmem_nx();
>>>    	else
>>> -		change_page_attr(page, numpages, PAGE_KERNEL);
>>> +		set_memory_attr((unsigned long)_sinittext, numpages,
>>> PAGE_KERNEL);
>>>    }
>>>    
>>>    #ifdef CONFIG_STRICT_KERNEL_RWX
>>>    void mark_rodata_ro(void)
>>>    {
>>> -	struct page *page;
>>>    	unsigned long numpages;
>>>    
>>>    	if (v_block_mapped((unsigned long)_sinittext)) {
>>> @@ -222,20 +144,18 @@ void mark_rodata_ro(void)
>>>    		return;
>>>    	}
>>>    
>>> -	page = virt_to_page(_stext);
>>>    	numpages = PFN_UP((unsigned long)_etext) -
>>>    		   PFN_DOWN((unsigned long)_stext);
>>>    
>>> -	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
>>> +	set_memory_attr((unsigned long)_stext, numpages,
>>> PAGE_KERNEL_ROX);
>>>    	/*
>>>    	 * mark .rodata as read only. Use __init_begin rather than
>>> __end_rodata
>>>    	 * to cover NOTES and EXCEPTION_TABLE.
>>>    	 */
>>> -	page = virt_to_page(__start_rodata);
>>>    	numpages = PFN_UP((unsigned long)__init_begin) -
>>>    		   PFN_DOWN((unsigned long)__start_rodata);
>>>    
>>> -	change_page_attr(page, numpages, PAGE_KERNEL_RO);
>>> +	set_memory_attr((unsigned long)__start_rodata, numpages,
>>> PAGE_KERNEL_RO);
>>>    
>>>    	// mark_initmem_nx() should have already run by now
>>>    	ptdump_check_wx();
>>> @@ -245,9 +165,14 @@ void mark_rodata_ro(void)
>>>    #ifdef CONFIG_DEBUG_PAGEALLOC
>>>    void __kernel_map_pages(struct page *page, int numpages, int
>>> enable)
>>>    {
>>> +	unsigned long addr = (unsigned long)page_address(page);
>>> +
>>>    	if (PageHighMem(page))
>>>    		return;
>>>    
>>> -	change_page_attr(page, numpages, enable ? PAGE_KERNEL :
>>> __pgprot(0));
>>> +	if (enable)
>>> +		set_memory_attr(addr, numpages, PAGE_KERNEL);
>>> +	else
>>> +		set_memory_attr(addr, numpages, __pgprot(0));
>>>    }
>>>    #endif /* CONFIG_DEBUG_PAGEALLOC */
>>>
diff mbox series

Patch

diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5fb90edd865e..3d92eaf3ee2f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@ 
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/set_memory.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -121,99 +122,20 @@  void __init mapin_ram(void)
 	}
 }
 
-/* Scan the real Linux page tables and return a PTE pointer for
- * a virtual address in a context.
- * Returns true (1) if PTE was found, zero otherwise.  The pointer to
- * the PTE pointer is unmodified if PTE is not found.
- */
-static int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
-{
-        pgd_t	*pgd;
-	pud_t	*pud;
-        pmd_t	*pmd;
-        pte_t	*pte;
-        int     retval = 0;
-
-        pgd = pgd_offset(mm, addr & PAGE_MASK);
-        if (pgd) {
-		pud = pud_offset(pgd, addr & PAGE_MASK);
-		if (pud && pud_present(*pud)) {
-			pmd = pmd_offset(pud, addr & PAGE_MASK);
-			if (pmd_present(*pmd)) {
-				pte = pte_offset_map(pmd, addr & PAGE_MASK);
-				if (pte) {
-					retval = 1;
-					*ptep = pte;
-					if (pmdp)
-						*pmdp = pmd;
-					/* XXX caller needs to do pte_unmap, yuck */
-				}
-			}
-		}
-        }
-        return(retval);
-}
-
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
-	pte_t *kpte;
-	pmd_t *kpmd;
-	unsigned long address;
-
-	BUG_ON(PageHighMem(page));
-	address = (unsigned long)page_address(page);
-
-	if (v_block_mapped(address))
-		return 0;
-	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
-		return -EINVAL;
-	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-	pte_unmap(kpte);
-
-	return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
-	int i, err = 0;
-	unsigned long flags;
-	struct page *start = page;
-
-	local_irq_save(flags);
-	for (i = 0; i < numpages; i++, page++) {
-		err = __change_page_attr_noflush(page, prot);
-		if (err)
-			break;
-	}
-	wmb();
-	local_irq_restore(flags);
-	flush_tlb_kernel_range((unsigned long)page_address(start),
-			       (unsigned long)page_address(page));
-	return err;
-}
-
 void mark_initmem_nx(void)
 {
-	struct page *page = virt_to_page(_sinittext);
 	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
 				 PFN_DOWN((unsigned long)_sinittext);
 
 	if (v_block_mapped((unsigned long)_stext + 1))
 		mmu_mark_initmem_nx();
 	else
-		change_page_attr(page, numpages, PAGE_KERNEL);
+		set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mark_rodata_ro(void)
 {
-	struct page *page;
 	unsigned long numpages;
 
 	if (v_block_mapped((unsigned long)_sinittext)) {
@@ -222,20 +144,18 @@  void mark_rodata_ro(void)
 		return;
 	}
 
-	page = virt_to_page(_stext);
 	numpages = PFN_UP((unsigned long)_etext) -
 		   PFN_DOWN((unsigned long)_stext);
 
-	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+	set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
 	/*
 	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
 	 * to cover NOTES and EXCEPTION_TABLE.
 	 */
-	page = virt_to_page(__start_rodata);
 	numpages = PFN_UP((unsigned long)__init_begin) -
 		   PFN_DOWN((unsigned long)__start_rodata);
 
-	change_page_attr(page, numpages, PAGE_KERNEL_RO);
+	set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
 
 	// mark_initmem_nx() should have already run by now
 	ptdump_check_wx();
@@ -245,9 +165,14 @@  void mark_rodata_ro(void)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
+	unsigned long addr = (unsigned long)page_address(page);
+
 	if (PageHighMem(page))
 		return;
 
-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+	if (enable)
+		set_memory_attr(addr, numpages, PAGE_KERNEL);
+	else
+		set_memory_attr(addr, numpages, __pgprot(0));
 }
 #endif /* CONFIG_DEBUG_PAGEALLOC */