Message ID | 653369-95ef-acd2-d6ea-e95f5a997493@google.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | arch: allow pte_offset_map[_lock]() to fail | expand |
On 6/8/23 21:18, Hugh Dickins wrote: > To keep balance in future, remember to pte_unmap() after a successful > get_ptep(). And act as if flush_cache_pages() really needs a map there, > to read the pfn before "unmapping", to be sure page table is not removed. > > Signed-off-by: Hugh Dickins <hughd@google.com> For the parisc parts: Acked-by: Helge Deller <deller@gmx.de> # parisc Helge > --- > arch/parisc/kernel/cache.c | 26 +++++++++++++++++++++----- > 1 file changed, 21 insertions(+), 5 deletions(-) > > diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c > index ca4a302d4365..501160250bb7 100644 > --- a/arch/parisc/kernel/cache.c > +++ b/arch/parisc/kernel/cache.c > @@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page) > offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; > addr = mpnt->vm_start + offset; > if (parisc_requires_coherency()) { > + bool needs_flush = false; > pte_t *ptep; > > ptep = get_ptep(mpnt->vm_mm, addr); > - if (ptep && pte_needs_flush(*ptep)) > + if (ptep) { > + needs_flush = pte_needs_flush(*ptep); > + pte_unmap(ptep); > + } > + if (needs_flush) > flush_user_cache_page(mpnt, addr); > } else { > /* > @@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr); > static void flush_cache_page_if_present(struct vm_area_struct *vma, > unsigned long vmaddr, unsigned long pfn) > { > - pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); > + bool needs_flush = false; > + pte_t *ptep; > > /* > * The pte check is racy and sometimes the flush will trigger > * a non-access TLB miss. Hopefully, the page has already been > * flushed. > */ > - if (ptep && pte_needs_flush(*ptep)) > + ptep = get_ptep(vma->vm_mm, vmaddr); > + if (ptep) { > + needs_flush = pte_needs_flush(*ptep); > + pte_unmap(ptep); > + } > + if (needs_flush) > flush_cache_page(vma, vmaddr, pfn); > } > > @@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u > pte_t *ptep; > > for (addr = start; addr < end; addr += PAGE_SIZE) { > + bool needs_flush = false; > /* > * The vma can contain pages that aren't present. Although > * the pte search is expensive, we need the pte to find the > * page pfn and to check whether the page should be flushed. > */ > ptep = get_ptep(vma->vm_mm, addr); > - if (ptep && pte_needs_flush(*ptep)) { > + if (ptep) { > + needs_flush = pte_needs_flush(*ptep); > + pfn = pte_pfn(*ptep); > + pte_unmap(ptep); > + } > + if (needs_flush) { > if (parisc_requires_coherency()) { > flush_user_cache_page(vma, addr); > } else { > - pfn = pte_pfn(*ptep); > if (WARN_ON(!pfn_valid(pfn))) > return; > __flush_cache_page(vma, addr, PFN_PHYS(pfn));
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ca4a302d4365..501160250bb7 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -426,10 +426,15 @@ void flush_dcache_page(struct page *page) offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; addr = mpnt->vm_start + offset; if (parisc_requires_coherency()) { + bool needs_flush = false; pte_t *ptep; ptep = get_ptep(mpnt->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_user_cache_page(mpnt, addr); } else { /* @@ -561,14 +566,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr); static void flush_cache_page_if_present(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - pte_t *ptep = get_ptep(vma->vm_mm, vmaddr); + bool needs_flush = false; + pte_t *ptep; /* * The pte check is racy and sometimes the flush will trigger * a non-access TLB miss. Hopefully, the page has already been * flushed. */ - if (ptep && pte_needs_flush(*ptep)) + ptep = get_ptep(vma->vm_mm, vmaddr); + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pte_unmap(ptep); + } + if (needs_flush) flush_cache_page(vma, vmaddr, pfn); } @@ -635,17 +646,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, u pte_t *ptep; for (addr = start; addr < end; addr += PAGE_SIZE) { + bool needs_flush = false; /* * The vma can contain pages that aren't present. Although * the pte search is expensive, we need the pte to find the * page pfn and to check whether the page should be flushed. */ ptep = get_ptep(vma->vm_mm, addr); - if (ptep && pte_needs_flush(*ptep)) { + if (ptep) { + needs_flush = pte_needs_flush(*ptep); + pfn = pte_pfn(*ptep); + pte_unmap(ptep); + } + if (needs_flush) { if (parisc_requires_coherency()) { flush_user_cache_page(vma, addr); } else { - pfn = pte_pfn(*ptep); if (WARN_ON(!pfn_valid(pfn))) return; __flush_cache_page(vma, addr, PFN_PHYS(pfn));
To keep balance in future, remember to pte_unmap() after a successful get_ptep(). And act as if flush_cache_pages() really needs a map there, to read the pfn before "unmapping", to be sure page table is not removed. Signed-off-by: Hugh Dickins <hughd@google.com> --- arch/parisc/kernel/cache.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-)