Message ID | 20200228054714.204424-2-arjunroy.kdev@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v3,mm,1/2] mm: Define pte_index as macro for x86 | expand |
On Thu, Feb 27, 2020 at 09:47:14PM -0800, Arjun Roy wrote: > diff --git a/mm/memory.c b/mm/memory.c > index d6f834f7d145..47b28fcc73c2 100644 > +++ b/mm/memory.c > @@ -1460,18 +1460,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, > return 0; > } > > -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, > - unsigned long addr, struct page *page, pgprot_t prot) > -{ > - int err; > - > - if (!page_count(page)) > - return -EINVAL; > - err = validate_page_before_insert(page); > - return err ? err : insert_page_into_pte_locked( > - mm, pte_offset_map(pmd, addr), addr, page, prot); > -} > - > /* > * This is the old fallback for page remapping. > * > @@ -1500,8 +1488,21 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, > return retval; > } > > +#ifdef pte_index It seems a bit weird like this, don't we usually do this kind of stuff with some CONFIG_ARCH_HAS_XX thing? IMHO all arches should implement pte_index as the static inline, that has been the general direction lately. Jason
On Fri, Feb 28, 2020 at 8:38 AM Jason Gunthorpe <jgg@ziepe.ca> wrote: > > On Thu, Feb 27, 2020 at 09:47:14PM -0800, Arjun Roy wrote: > > diff --git a/mm/memory.c b/mm/memory.c > > index d6f834f7d145..47b28fcc73c2 100644 > > +++ b/mm/memory.c > > @@ -1460,18 +1460,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, > > return 0; > > } > > > > -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, > > - unsigned long addr, struct page *page, pgprot_t prot) > > -{ > > - int err; > > - > > - if (!page_count(page)) > > - return -EINVAL; > > - err = validate_page_before_insert(page); > > - return err ? err : insert_page_into_pte_locked( > > - mm, pte_offset_map(pmd, addr), addr, page, prot); > > -} > > - > > /* > > * This is the old fallback for page remapping. > > * > > @@ -1500,8 +1488,21 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, > > return retval; > > } > > > > +#ifdef pte_index > > It seems a bit weird like this, don't we usually do this kind of stuff > with some CONFIG_ARCH_HAS_XX thing? > > IMHO all arches should implement pte_index as the static inline, that > has been the general direction lately. Based on a comment from Stephen Rothwell, we found out that "static inline" functions are only used in tile and x86. That's why Arjun opted for this method to create a smaller patch-series to fix the build breakage. Thanks, Soheil > Jason
diff --git a/mm/memory.c b/mm/memory.c index d6f834f7d145..47b28fcc73c2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1460,18 +1460,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, return 0; } -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, struct page *page, pgprot_t prot) -{ - int err; - - if (!page_count(page)) - return -EINVAL; - err = validate_page_before_insert(page); - return err ? err : insert_page_into_pte_locked( - mm, pte_offset_map(pmd, addr), addr, page, prot); -} - /* * This is the old fallback for page remapping. * @@ -1500,8 +1488,21 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, return retval; } +#ifdef pte_index +static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, struct page *page, pgprot_t prot) +{ + int err; + + if (!page_count(page)) + return -EINVAL; + err = validate_page_before_insert(page); + return err ? err : insert_page_into_pte_locked( + mm, pte_offset_map(pmd, addr), addr, page, prot); +} + /* insert_pages() amortizes the cost of spinlock operations - * when inserting pages in a loop. + * when inserting pages in a loop. Arch *must* define pte_index. */ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) @@ -1556,6 +1557,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, *num = remaining_pages_total; return ret; } +#endif /* ifdef pte_index */ /** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. @@ -1575,6 +1577,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) { +#ifdef pte_index const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; if (addr < vma->vm_start || end_addr >= vma->vm_end) @@ -1586,6 +1589,18 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, } /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* ifdef pte_index */ } EXPORT_SYMBOL(vm_insert_pages);