Message ID | 1637223483-2867-1-git-send-email-huangzhaoyang@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [RFC] arch: arm64: try to use PTE_CONT when change page attr | expand |
forget the criteria for judging the linear address range, so please ignore this patch On Thu, Nov 18, 2021 at 4:18 PM Huangzhaoyang <huangzhaoyang@gmail.com> wrote: > > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > > kernel will use the min granularity when rodata_full enabled which > make TLB pressure high. Furthermore, there is no PTE_CONT applied. > Try to improve these a little by apply PTE_CONT when change page's > attr. > > Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com> > --- > arch/arm64/mm/pageattr.c | 62 ++++++++++++++++++++++++++++++++++++++++++++---- > 1 file changed, 58 insertions(+), 4 deletions(-) > > diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c > index a3bacd7..0b6a354 100644 > --- a/arch/arm64/mm/pageattr.c > +++ b/arch/arm64/mm/pageattr.c > @@ -61,8 +61,13 @@ static int change_memory_common(unsigned long addr, int numpages, > unsigned long start = addr; > unsigned long size = PAGE_SIZE * numpages; > unsigned long end = start + size; > + unsigned long cont_pte_start = 0; > + unsigned long cont_pte_end = 0; > + unsigned long cont_pmd_start = 0; > + unsigned long cont_pmd_end = 0; > + pgprot_t orig_set_mask = set_mask; > struct vm_struct *area; > - int i; > + int i = 0; > > if (!PAGE_ALIGNED(addr)) { > start &= PAGE_MASK; > @@ -98,9 +103,58 @@ static int change_memory_common(unsigned long addr, int numpages, > */ > if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || > pgprot_val(clear_mask) == PTE_RDONLY)) { > - for (i = 0; i < area->nr_pages; i++) { > - __change_memory_common((u64)page_address(area->pages[i]), > - PAGE_SIZE, set_mask, clear_mask); > + cont_pmd_start = (start + ~CONT_PMD_MASK + 1) & CONT_PMD_MASK; > + cont_pmd_end = cont_pmd_start + ~CONT_PMD_MASK + 1; > + cont_pte_start = (start + ~CONT_PTE_MASK + 1) & CONT_PTE_MASK; > + cont_pte_end = cont_pte_start + ~CONT_PTE_MASK + 1; > + > + if (addr <= cont_pmd_start && end > cont_pmd_end) { > + do { > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr < cont_pmd_start); > + do { > + set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT); > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr < cont_pmd_end); > + set_mask = orig_set_mask; > + do { > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr <= end); > + } else if (addr <= cont_pte_start && end > cont_pte_end) { > + do { > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr < cont_pte_start); > + do { > + set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT); > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr < cont_pte_end); > + set_mask = orig_set_mask; > + do { > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + i++; > + addr++; > + } while(addr <= end); > + } else { > + for (i = 0; i < area->nr_pages; i++) { > + __change_memory_common((u64)page_address(area->pages[i]), > + PAGE_SIZE, set_mask, clear_mask); > + } > } > } > > -- > 1.9.1 >
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a3bacd7..0b6a354 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -61,8 +61,13 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long start = addr; unsigned long size = PAGE_SIZE * numpages; unsigned long end = start + size; + unsigned long cont_pte_start = 0; + unsigned long cont_pte_end = 0; + unsigned long cont_pmd_start = 0; + unsigned long cont_pmd_end = 0; + pgprot_t orig_set_mask = set_mask; struct vm_struct *area; - int i; + int i = 0; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -98,9 +103,58 @@ static int change_memory_common(unsigned long addr, int numpages, */ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { - for (i = 0; i < area->nr_pages; i++) { - __change_memory_common((u64)page_address(area->pages[i]), - PAGE_SIZE, set_mask, clear_mask); + cont_pmd_start = (start + ~CONT_PMD_MASK + 1) & CONT_PMD_MASK; + cont_pmd_end = cont_pmd_start + ~CONT_PMD_MASK + 1; + cont_pte_start = (start + ~CONT_PTE_MASK + 1) & CONT_PTE_MASK; + cont_pte_end = cont_pte_start + ~CONT_PTE_MASK + 1; + + if (addr <= cont_pmd_start && end > cont_pmd_end) { + do { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr < cont_pmd_start); + do { + set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT); + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr < cont_pmd_end); + set_mask = orig_set_mask; + do { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr <= end); + } else if (addr <= cont_pte_start && end > cont_pte_end) { + do { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr < cont_pte_start); + do { + set_mask = __pgprot(pgprot_val(set_mask) | PTE_CONT); + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr < cont_pte_end); + set_mask = orig_set_mask; + do { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + i++; + addr++; + } while(addr <= end); + } else { + for (i = 0; i < area->nr_pages; i++) { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + } } }