Message ID | 20210204013942.8398-1-huangpei@loongson.cn (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | MIPS: make userspace mapping young by default | expand |
Excerpts from Huang Pei's message of February 4, 2021 11:39 am: > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > + 2 TLB Invalid), butthe second TLB Invalid exception is just > triggered by __update_tlb from do_page_fault writing tlb without > _PAGE_VALID set. With this patch, user space mapping prot is made > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > and it only take 1 TLB Miss + 1 TLB Invalid exception > > Remove pte_sw_mkyoung without polluting MM code and make page fault > delay of MIPS on par with other architecture > > Signed-off-by: Huang Pei <huangpei@loongson.cn> Could we merge this? For the core code, Reviewed-by: Nicholas Piggin <npiggin@gmail.com> > --- > arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- > include/linux/pgtable.h | 8 -------- > mm/memory.c | 3 --- > 3 files changed, 16 insertions(+), 25 deletions(-) > > diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c > index 23b16bfd97b2..e19cf424bb39 100644 > --- a/arch/mips/mm/cache.c > +++ b/arch/mips/mm/cache.c > @@ -156,29 +156,31 @@ unsigned long _page_cachable_default; > EXPORT_SYMBOL(_page_cachable_default); > > #define PM(p) __pgprot(_page_cachable_default | (p)) > +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) > > static inline void setup_protection_map(void) > { > protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[4] = PM(_PAGE_PRESENT); > - protection_map[5] = PM(_PAGE_PRESENT); > - protection_map[6] = PM(_PAGE_PRESENT); > - protection_map[7] = PM(_PAGE_PRESENT); > + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[4] = PVA(_PAGE_PRESENT); > + protection_map[5] = PVA(_PAGE_PRESENT); > + protection_map[6] = PVA(_PAGE_PRESENT); > + protection_map[7] = PVA(_PAGE_PRESENT); > > protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > _PAGE_NO_READ); > - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > - protection_map[12] = PM(_PAGE_PRESENT); > - protection_map[13] = PM(_PAGE_PRESENT); > - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); > - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); > + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > + protection_map[12] = PVA(_PAGE_PRESENT); > + protection_map[13] = PVA(_PAGE_PRESENT); > + protection_map[14] = PVA(_PAGE_PRESENT); > + protection_map[15] = PVA(_PAGE_PRESENT); > } > > +#undef _PVA > #undef PM > > void cpu_cache_init(void) > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h > index 8fcdfa52eb4b..8c042627399a 100644 > --- a/include/linux/pgtable.h > +++ b/include/linux/pgtable.h > @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres > * To be differentiate with macro pte_mkyoung, this macro is used on platforms > * where software maintains page access bit. > */ > -#ifndef pte_sw_mkyoung > -static inline pte_t pte_sw_mkyoung(pte_t pte) > -{ > - return pte; > -} > -#define pte_sw_mkyoung pte_sw_mkyoung > -#endif > - > #ifndef pte_savedwrite > #define pte_savedwrite pte_write > #endif > diff --git a/mm/memory.c b/mm/memory.c > index feff48e1465a..95718a623884 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2890,7 +2890,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) > } > flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); > entry = mk_pte(new_page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > > /* > @@ -3548,7 +3547,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > __SetPageUptodate(page); > > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (vma->vm_flags & VM_WRITE) > entry = pte_mkwrite(pte_mkdirty(entry)); > > @@ -3824,7 +3822,6 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) > > flush_icache_page(vma, page); > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (write) > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > /* copy-on-write page */ > -- > 2.17.1 > > >
I am ok with it Original Message From: Nicholas Piggin Sent: 2021年2月4日星期四 11:55 To: ambrosehua@gmail.com; Huang Pei; Thomas Bogendoerfer Cc: Andrew Morton; Huacai Chen; Gao Juxin; Jiaxun Yang; linux-arch@vger.kernel.org; linux-mips@vger.kernel.org; linux-mm@kvack.org; Li Xuefeng; Bibo Mao; Paul Burton; Yang Tiezhu; Fuxin Zhang Subject: Re: [PATCH] MIPS: make userspace mapping young by default Excerpts from Huang Pei's message of February 4, 2021 11:39 am: > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > + 2 TLB Invalid), butthe second TLB Invalid exception is just > triggered by __update_tlb from do_page_fault writing tlb without > _PAGE_VALID set. With this patch, user space mapping prot is made > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > and it only take 1 TLB Miss + 1 TLB Invalid exception > > Remove pte_sw_mkyoung without polluting MM code and make page fault > delay of MIPS on par with other architecture > > Signed-off-by: Huang Pei <huangpei@loongson.cn> Could we merge this? For the core code, Reviewed-by: Nicholas Piggin <npiggin@gmail.com> > --- > arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- > include/linux/pgtable.h | 8 -------- > mm/memory.c | 3 --- > 3 files changed, 16 insertions(+), 25 deletions(-) > > diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c > index 23b16bfd97b2..e19cf424bb39 100644 > --- a/arch/mips/mm/cache.c > +++ b/arch/mips/mm/cache.c > @@ -156,29 +156,31 @@ unsigned long _page_cachable_default; > EXPORT_SYMBOL(_page_cachable_default); > > #define PM(p) __pgprot(_page_cachable_default | (p)) > +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) > > static inline void setup_protection_map(void) > { > protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[4] = PM(_PAGE_PRESENT); > - protection_map[5] = PM(_PAGE_PRESENT); > - protection_map[6] = PM(_PAGE_PRESENT); > - protection_map[7] = PM(_PAGE_PRESENT); > + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[4] = PVA(_PAGE_PRESENT); > + protection_map[5] = PVA(_PAGE_PRESENT); > + protection_map[6] = PVA(_PAGE_PRESENT); > + protection_map[7] = PVA(_PAGE_PRESENT); > > protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > _PAGE_NO_READ); > - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > - protection_map[12] = PM(_PAGE_PRESENT); > - protection_map[13] = PM(_PAGE_PRESENT); > - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); > - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); > + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > + protection_map[12] = PVA(_PAGE_PRESENT); > + protection_map[13] = PVA(_PAGE_PRESENT); > + protection_map[14] = PVA(_PAGE_PRESENT); > + protection_map[15] = PVA(_PAGE_PRESENT); > } > > +#undef _PVA > #undef PM > > void cpu_cache_init(void) > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h > index 8fcdfa52eb4b..8c042627399a 100644 > --- a/include/linux/pgtable.h > +++ b/include/linux/pgtable.h > @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres > * To be differentiate with macro pte_mkyoung, this macro is used on platforms > * where software maintains page access bit. > */ > -#ifndef pte_sw_mkyoung > -static inline pte_t pte_sw_mkyoung(pte_t pte) > -{ > - return pte; > -} > -#define pte_sw_mkyoung pte_sw_mkyoung > -#endif > - > #ifndef pte_savedwrite > #define pte_savedwrite pte_write > #endif > diff --git a/mm/memory.c b/mm/memory.c > index feff48e1465a..95718a623884 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2890,7 +2890,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) > } > flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); > entry = mk_pte(new_page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > > /* > @@ -3548,7 +3547,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > __SetPageUptodate(page); > > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (vma->vm_flags & VM_WRITE) > entry = pte_mkwrite(pte_mkdirty(entry)); > > @@ -3824,7 +3822,6 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) > > flush_icache_page(vma, page); > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (write) > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > /* copy-on-write page */ > -- > 2.17.1 > > >
On Thu, Feb 04, 2021 at 01:29:26PM +1000, Nicholas Piggin wrote: > Excerpts from Huang Pei's message of February 4, 2021 11:39 am: > > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > > + 2 TLB Invalid), butthe second TLB Invalid exception is just > > triggered by __update_tlb from do_page_fault writing tlb without > > _PAGE_VALID set. With this patch, user space mapping prot is made > > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > > and it only take 1 TLB Miss + 1 TLB Invalid exception > > > > Remove pte_sw_mkyoung without polluting MM code and make page fault > > delay of MIPS on par with other architecture > > > > Signed-off-by: Huang Pei <huangpei@loongson.cn> > > Could we merge this? For the core code, sure, but IMHO I should only merge the MIPS part, correct ? Thomas.
Excerpts from Thomas Bogendoerfer's message of February 4, 2021 8:34 pm: > On Thu, Feb 04, 2021 at 01:29:26PM +1000, Nicholas Piggin wrote: >> Excerpts from Huang Pei's message of February 4, 2021 11:39 am: >> > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss >> > + 2 TLB Invalid), butthe second TLB Invalid exception is just >> > triggered by __update_tlb from do_page_fault writing tlb without >> > _PAGE_VALID set. With this patch, user space mapping prot is made >> > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), >> > and it only take 1 TLB Miss + 1 TLB Invalid exception >> > >> > Remove pte_sw_mkyoung without polluting MM code and make page fault >> > delay of MIPS on par with other architecture >> > >> > Signed-off-by: Huang Pei <huangpei@loongson.cn> >> >> Could we merge this? For the core code, > > sure, but IMHO I should only merge the MIPS part, correct ? You could ack it for MIPS and Andrew could take it through his tree would avoid dependencies. Thanks, Nick
On Thu, Feb 04, 2021 at 09:39:42AM +0800, Huang Pei wrote: > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > + 2 TLB Invalid), butthe second TLB Invalid exception is just > triggered by __update_tlb from do_page_fault writing tlb without > _PAGE_VALID set. With this patch, user space mapping prot is made > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > and it only take 1 TLB Miss + 1 TLB Invalid exception > > Remove pte_sw_mkyoung without polluting MM code and make page fault > delay of MIPS on par with other architecture > > Signed-off-by: Huang Pei <huangpei@loongson.cn> > --- > arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- > include/linux/pgtable.h | 8 -------- > mm/memory.c | 3 --- > 3 files changed, 16 insertions(+), 25 deletions(-) Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Andrew, can you take this patch through your tree ? Thomas. > > diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c > index 23b16bfd97b2..e19cf424bb39 100644 > --- a/arch/mips/mm/cache.c > +++ b/arch/mips/mm/cache.c > @@ -156,29 +156,31 @@ unsigned long _page_cachable_default; > EXPORT_SYMBOL(_page_cachable_default); > > #define PM(p) __pgprot(_page_cachable_default | (p)) > +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) > > static inline void setup_protection_map(void) > { > protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[4] = PM(_PAGE_PRESENT); > - protection_map[5] = PM(_PAGE_PRESENT); > - protection_map[6] = PM(_PAGE_PRESENT); > - protection_map[7] = PM(_PAGE_PRESENT); > + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[4] = PVA(_PAGE_PRESENT); > + protection_map[5] = PVA(_PAGE_PRESENT); > + protection_map[6] = PVA(_PAGE_PRESENT); > + protection_map[7] = PVA(_PAGE_PRESENT); > > protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); > - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); > - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); > + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | > _PAGE_NO_READ); > - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > - protection_map[12] = PM(_PAGE_PRESENT); > - protection_map[13] = PM(_PAGE_PRESENT); > - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); > - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); > + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); > + protection_map[12] = PVA(_PAGE_PRESENT); > + protection_map[13] = PVA(_PAGE_PRESENT); > + protection_map[14] = PVA(_PAGE_PRESENT); > + protection_map[15] = PVA(_PAGE_PRESENT); > } > > +#undef _PVA > #undef PM > > void cpu_cache_init(void) > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h > index 8fcdfa52eb4b..8c042627399a 100644 > --- a/include/linux/pgtable.h > +++ b/include/linux/pgtable.h > @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres > * To be differentiate with macro pte_mkyoung, this macro is used on platforms > * where software maintains page access bit. > */ > -#ifndef pte_sw_mkyoung > -static inline pte_t pte_sw_mkyoung(pte_t pte) > -{ > - return pte; > -} > -#define pte_sw_mkyoung pte_sw_mkyoung > -#endif > - > #ifndef pte_savedwrite > #define pte_savedwrite pte_write > #endif > diff --git a/mm/memory.c b/mm/memory.c > index feff48e1465a..95718a623884 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2890,7 +2890,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) > } > flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); > entry = mk_pte(new_page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > > /* > @@ -3548,7 +3547,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) > __SetPageUptodate(page); > > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (vma->vm_flags & VM_WRITE) > entry = pte_mkwrite(pte_mkdirty(entry)); > > @@ -3824,7 +3822,6 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) > > flush_icache_page(vma, page); > entry = mk_pte(page, vma->vm_page_prot); > - entry = pte_sw_mkyoung(entry); > if (write) > entry = maybe_mkwrite(pte_mkdirty(entry), vma); > /* copy-on-write page */ > -- > 2.17.1
On Thu, 4 Feb 2021 16:22:39 +0100 Thomas Bogendoerfer <tsbogend@alpha.franken.de> wrote: > On Thu, Feb 04, 2021 at 09:39:42AM +0800, Huang Pei wrote: > > MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > > + 2 TLB Invalid), butthe second TLB Invalid exception is just > > triggered by __update_tlb from do_page_fault writing tlb without > > _PAGE_VALID set. With this patch, user space mapping prot is made > > young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > > and it only take 1 TLB Miss + 1 TLB Invalid exception > > > > Remove pte_sw_mkyoung without polluting MM code and make page fault > > delay of MIPS on par with other architecture > > > > Signed-off-by: Huang Pei <huangpei@loongson.cn> > > --- > > arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- > > include/linux/pgtable.h | 8 -------- > > mm/memory.c | 3 --- > > 3 files changed, 16 insertions(+), 25 deletions(-) > > Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> > > Andrew, can you take this patch through your tree ? Sure. I'll drop Christophe's "mm/memory.c: remove pte_sw_mkyoung()" (https://lkml.kernel.org/r/f302ef92c48d1f08a0459aaee1c568ca11213814.1612345700.git.christophe.leroy@csgroup.eu) in favour of this one. I changed this patch a bit due to other changes in -next. Please check do_set_pte(). From: Huang Pei <huangpei@loongson.cn> Subject: MIPS: make userspace mapping young by default MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss + 2 TLB Invalid), butthe second TLB Invalid exception is just triggered by __update_tlb from do_page_fault writing tlb without _PAGE_VALID set. With this patch, user space mapping prot is made young by default (with both _PAGE_VALID and _PAGE_YOUNG set), and it only take 1 TLB Miss + 1 TLB Invalid exception Remove pte_sw_mkyoung without polluting MM code and make page fault delay of MIPS on par with other architecture Link: https://lkml.kernel.org/r/20210204013942.8398-1-huangpei@loongson.cn Signed-off-by: Huang Pei <huangpei@loongson.cn> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Acked-by: <huangpei@loongson.cn> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: <ambrosehua@gmail.com> Cc: Bibo Mao <maobibo@loongson.cn> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> Cc: Paul Burton <paulburton@kernel.org> Cc: Li Xuefeng <lixuefeng@loongson.cn> Cc: Yang Tiezhu <yangtiezhu@loongson.cn> Cc: Gao Juxin <gaojuxin@loongson.cn> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Huacai Chen <chenhc@lemote.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- include/linux/pgtable.h | 8 -------- mm/memory.c | 4 ---- 3 files changed, 16 insertions(+), 26 deletions(-) --- a/arch/mips/mm/cache.c~mips-make-userspace-mapping-young-by-default +++ a/arch/mips/mm/cache.c @@ -157,29 +157,31 @@ unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = PM(_PAGE_PRESENT); - protection_map[5] = PM(_PAGE_PRESENT); - protection_map[6] = PM(_PAGE_PRESENT); - protection_map[7] = PM(_PAGE_PRESENT); + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[4] = PVA(_PAGE_PRESENT); + protection_map[5] = PVA(_PAGE_PRESENT); + protection_map[6] = PVA(_PAGE_PRESENT); + protection_map[7] = PVA(_PAGE_PRESENT); protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = PM(_PAGE_PRESENT); - protection_map[13] = PM(_PAGE_PRESENT); - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); + protection_map[12] = PVA(_PAGE_PRESENT); + protection_map[13] = PVA(_PAGE_PRESENT); + protection_map[14] = PVA(_PAGE_PRESENT); + protection_map[15] = PVA(_PAGE_PRESENT); } +#undef _PVA #undef PM void cpu_cache_init(void) --- a/include/linux/pgtable.h~mips-make-userspace-mapping-young-by-default +++ a/include/linux/pgtable.h @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(st * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ -#ifndef pte_sw_mkyoung -static inline pte_t pte_sw_mkyoung(pte_t pte) -{ - return pte; -} -#define pte_sw_mkyoung pte_sw_mkyoung -#endif - #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif --- a/mm/memory.c~mips-make-userspace-mapping-young-by-default +++ a/mm/memory.c @@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(stru __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); @@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, st if (prefault && arch_wants_old_prefaulted_pte()) entry = pte_mkold(entry); - else - entry = pte_sw_mkyoung(entry); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Le 06/02/2021 à 00:41, Andrew Morton a écrit : > On Thu, 4 Feb 2021 16:22:39 +0100 Thomas Bogendoerfer <tsbogend@alpha.franken.de> wrote: > >> On Thu, Feb 04, 2021 at 09:39:42AM +0800, Huang Pei wrote: >>> MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss >>> + 2 TLB Invalid), butthe second TLB Invalid exception is just >>> triggered by __update_tlb from do_page_fault writing tlb without >>> _PAGE_VALID set. With this patch, user space mapping prot is made >>> young by default (with both _PAGE_VALID and _PAGE_YOUNG set), >>> and it only take 1 TLB Miss + 1 TLB Invalid exception >>> >>> Remove pte_sw_mkyoung without polluting MM code and make page fault >>> delay of MIPS on par with other architecture >>> >>> Signed-off-by: Huang Pei <huangpei@loongson.cn> >>> --- >>> arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- >>> include/linux/pgtable.h | 8 -------- >>> mm/memory.c | 3 --- >>> 3 files changed, 16 insertions(+), 25 deletions(-) >> >> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> >> >> Andrew, can you take this patch through your tree ? > > Sure. I'll drop Christophe's "mm/memory.c: remove pte_sw_mkyoung()" > (https://lkml.kernel.org/r/f302ef92c48d1f08a0459aaee1c568ca11213814.1612345700.git.christophe.leroy@csgroup.eu) > in favour of this one. > Pitty. My patch was improving page faults on powerpc/32. That one is only addressing MIPS. Any plan to take the series from Nick https://patchwork.kernel.org/project/linux-mm/list/?series=404539 ? Christophe
On Mon, 8 Feb 2021 18:44:22 +0100 Christophe Leroy <christophe.leroy@csgroup.eu> wrote: > > > Le 06/02/2021 à 00:41, Andrew Morton a écrit : > > On Thu, 4 Feb 2021 16:22:39 +0100 Thomas Bogendoerfer <tsbogend@alpha.franken.de> wrote: > > > >> On Thu, Feb 04, 2021 at 09:39:42AM +0800, Huang Pei wrote: > >>> MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss > >>> + 2 TLB Invalid), butthe second TLB Invalid exception is just > >>> triggered by __update_tlb from do_page_fault writing tlb without > >>> _PAGE_VALID set. With this patch, user space mapping prot is made > >>> young by default (with both _PAGE_VALID and _PAGE_YOUNG set), > >>> and it only take 1 TLB Miss + 1 TLB Invalid exception > >>> > >>> Remove pte_sw_mkyoung without polluting MM code and make page fault > >>> delay of MIPS on par with other architecture > >>> > >>> Signed-off-by: Huang Pei <huangpei@loongson.cn> > >>> --- > >>> arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- > >>> include/linux/pgtable.h | 8 -------- > >>> mm/memory.c | 3 --- > >>> 3 files changed, 16 insertions(+), 25 deletions(-) > >> > >> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> > >> > >> Andrew, can you take this patch through your tree ? > > > > Sure. I'll drop Christophe's "mm/memory.c: remove pte_sw_mkyoung()" > > (https://lkml.kernel.org/r/f302ef92c48d1f08a0459aaee1c568ca11213814.1612345700.git.christophe.leroy@csgroup.eu) > > in favour of this one. > > > > Pitty. My patch was improving page faults on powerpc/32. How does it do that? By running pte_mkyoung() for powerpc32? Such a change is still valid, isn't it? > That one is only addressing MIPS. It cleans up core code nicely, by removing a MIPS wart. We can still add a ppc32 wart? > > Any plan to take the series from Nick > https://patchwork.kernel.org/project/linux-mm/list/?series=404539 ? I expect so. After -rc1, if the churn is settling down and reviewers are happy enough.
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 23b16bfd97b2..e19cf424bb39 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -156,29 +156,31 @@ unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = PM(_PAGE_PRESENT); - protection_map[5] = PM(_PAGE_PRESENT); - protection_map[6] = PM(_PAGE_PRESENT); - protection_map[7] = PM(_PAGE_PRESENT); + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[4] = PVA(_PAGE_PRESENT); + protection_map[5] = PVA(_PAGE_PRESENT); + protection_map[6] = PVA(_PAGE_PRESENT); + protection_map[7] = PVA(_PAGE_PRESENT); protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = PM(_PAGE_PRESENT); - protection_map[13] = PM(_PAGE_PRESENT); - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); + protection_map[12] = PVA(_PAGE_PRESENT); + protection_map[13] = PVA(_PAGE_PRESENT); + protection_map[14] = PVA(_PAGE_PRESENT); + protection_map[15] = PVA(_PAGE_PRESENT); } +#undef _PVA #undef PM void cpu_cache_init(void) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8fcdfa52eb4b..8c042627399a 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ -#ifndef pte_sw_mkyoung -static inline pte_t pte_sw_mkyoung(pte_t pte) -{ - return pte; -} -#define pte_sw_mkyoung pte_sw_mkyoung -#endif - #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif diff --git a/mm/memory.c b/mm/memory.c index feff48e1465a..95718a623884 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2890,7 +2890,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -3548,7 +3547,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); @@ -3824,7 +3822,6 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* copy-on-write page */
MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss + 2 TLB Invalid), butthe second TLB Invalid exception is just triggered by __update_tlb from do_page_fault writing tlb without _PAGE_VALID set. With this patch, user space mapping prot is made young by default (with both _PAGE_VALID and _PAGE_YOUNG set), and it only take 1 TLB Miss + 1 TLB Invalid exception Remove pte_sw_mkyoung without polluting MM code and make page fault delay of MIPS on par with other architecture Signed-off-by: Huang Pei <huangpei@loongson.cn> --- arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- include/linux/pgtable.h | 8 -------- mm/memory.c | 3 --- 3 files changed, 16 insertions(+), 25 deletions(-)