@@ -193,9 +193,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
}
/*
- * atomic_pte_lookup
+ * atomic_pte_lookup() - Convert a user virtual address
+ * to a physical address.
+ * @Return: true for success, false for failure. Failure means that
+ * the page could not be pinned via gup fast.
*
- * Convert a user virtual address to a physical address
* Only supports Intel large pages (2MB only) on x86_64.
* ZZZ - hugepage support is incomplete
*
@@ -205,49 +207,20 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
int write, unsigned long *paddr, int *pageshift)
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t pte;
-
- pgdp = pgd_offset(vma->vm_mm, vaddr);
- if (unlikely(pgd_none(*pgdp)))
- goto err;
-
- p4dp = p4d_offset(pgdp, vaddr);
- if (unlikely(p4d_none(*p4dp)))
- goto err;
-
- pudp = pud_offset(p4dp, vaddr);
- if (unlikely(pud_none(*pudp)))
- goto err;
-
- pmdp = pmd_offset(pudp, vaddr);
- if (unlikely(pmd_none(*pmdp)))
- goto err;
-#ifdef CONFIG_X86_64
- if (unlikely(pmd_large(*pmdp)))
- pte = *(pte_t *) pmdp;
- else
-#endif
- pte = *pte_offset_kernel(pmdp, vaddr);
-
- if (unlikely(!pte_present(pte) ||
- (write && (!pte_write(pte) || !pte_dirty(pte)))))
- return 1;
-
- *paddr = pte_pfn(pte) << PAGE_SHIFT;
+ struct page *page;
if (unlikely(is_vm_hugetlb_page(vma)))
*pageshift = HPAGE_SHIFT;
else
*pageshift = PAGE_SHIFT;
- return 0;
+ if (!__get_user_pages_fast(vaddr, 1, write, &page))
+ return false;
-err:
- return 1;
+ *paddr = page_to_phys(page);
+ put_user_page(page);
+
+ return true;
}
static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
@@ -256,7 +229,8 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
struct mm_struct *mm = gts->ts_mm;
struct vm_area_struct *vma;
unsigned long paddr;
- int ret, ps;
+ int ps;
+ bool success;
vma = find_vma(mm, vaddr);
if (!vma)
@@ -267,8 +241,8 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
* context.
*/
rmb(); /* Must/check ms_range_active before loading PTEs */
- ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
- if (ret) {
+ success = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
+ if (!success) {
if (atomic)
goto upm;
if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))