@@ -1500,8 +1500,9 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
return retval;
}
+#ifdef pte_index
/* insert_pages() amortizes the cost of spinlock operations
- * when inserting pages in a loop.
+ * when inserting pages in a loop. Arch *must* define pte_index.
*/
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num, pgprot_t prot)
@@ -1556,6 +1557,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
*num = remaining_pages_total;
return ret;
}
+#endif /* ifdef pte_index */
/**
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
@@ -1575,6 +1577,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num)
{
+#ifdef pte_index
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
if (addr < vma->vm_start || end_addr >= vma->vm_end)
@@ -1586,6 +1589,18 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
+#else
+ unsigned long idx = 0, pgcount = *num;
+ int err;
+
+ for (; idx < pgcount; ++idx) {
+ err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
+ if (err)
+ break;
+ }
+ *num = pgcount - idx;
+ return err;
+#endif /* ifdef pte_index */
}
EXPORT_SYMBOL(vm_insert_pages);