Message ID | 1565290555-14126-2-git-send-email-linux.bhar@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | get_user_pages changes | expand |
On 8/8/19 11:55 AM, Bharath Vedartham wrote: ... > static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > int write, int atomic, unsigned long *gpa, int *pageshift) > { > struct mm_struct *mm = gts->ts_mm; > struct vm_area_struct *vma; > unsigned long paddr; > - int ret, ps; > + int ret; > + struct page *page; > > vma = find_vma(mm, vaddr); > if (!vma) > @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > > /* > * Atomic lookup is faster & usually works even if called in non-atomic > - * context. > + * context. get_user_pages_fast does atomic lookup before falling back to > + * slow gup. > */ > rmb(); /* Must/check ms_range_active before loading PTEs */ > - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); > - if (ret) { > - if (atomic) > + if (atomic) { > + ret = __get_user_pages_fast(vaddr, 1, write, &page); > + if (!ret) > goto upm; > - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) > + } else { > + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page); > + if (!ret) > goto inval; > } > + > + paddr = page_to_phys(page); > + put_user_page(page); > + > + if (unlikely(is_vm_hugetlb_page(vma))) > + *pageshift = HPAGE_SHIFT; > + else > + *pageshift = PAGE_SHIFT; > + > if (is_gru_paddr(paddr)) > goto inval; > - paddr = paddr & ~((1UL << ps) - 1); > + paddr = paddr & ~((1UL << *pageshift) - 1); > *gpa = uv_soc_phys_ram_to_gpa(paddr); > - *pageshift = ps; Why are you no longer setting *pageshift? There are a couple of callers that both use this variable. thanks,
On 8/8/19 4:21 PM, John Hubbard wrote: > On 8/8/19 11:55 AM, Bharath Vedartham wrote: > ... >> if (is_gru_paddr(paddr)) >> goto inval; >> - paddr = paddr & ~((1UL << ps) - 1); >> + paddr = paddr & ~((1UL << *pageshift) - 1); >> *gpa = uv_soc_phys_ram_to_gpa(paddr); >> - *pageshift = ps; > > Why are you no longer setting *pageshift? There are a couple of callers > that both use this variable. > > ...and once that's figured out, I can fix it up here and send it up with the next misc callsites series. I'm also inclined to make the commit log read more like this: sgi-gru: Remove *pte_lookup functions, convert to put_user_page*() For pages that were retained via get_user_pages*(), release those pages via the new put_user_page*() routines, instead of via put_page() or release_pages(). This is part a tree-wide conversion, as described in commit fc1d8e7cca2d ("mm: introduce put_user_page*(), placeholder versions"). As part of this conversion, the *pte_lookup functions can be removed and be easily replaced with get_user_pages_fast() functions. In the case of atomic lookup, __get_user_pages_fast() is used, because it does not fall back to the slow path: get_user_pages(). get_user_pages_fast(), on the other hand, first calls __get_user_pages_fast(), but then falls back to the slow path if __get_user_pages_fast() fails. Also: remove unnecessary CONFIG_HUGETLB ifdefs. thanks,
On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote: > On 8/8/19 11:55 AM, Bharath Vedartham wrote: > ... > > static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > > int write, int atomic, unsigned long *gpa, int *pageshift) > > { > > struct mm_struct *mm = gts->ts_mm; > > struct vm_area_struct *vma; > > unsigned long paddr; > > - int ret, ps; > > + int ret; > > + struct page *page; > > > > vma = find_vma(mm, vaddr); > > if (!vma) > > @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > > > > /* > > * Atomic lookup is faster & usually works even if called in non-atomic > > - * context. > > + * context. get_user_pages_fast does atomic lookup before falling back to > > + * slow gup. > > */ > > rmb(); /* Must/check ms_range_active before loading PTEs */ > > - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); > > - if (ret) { > > - if (atomic) > > + if (atomic) { > > + ret = __get_user_pages_fast(vaddr, 1, write, &page); > > + if (!ret) > > goto upm; > > - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) > > + } else { > > + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page); > > + if (!ret) > > goto inval; > > } > > + > > + paddr = page_to_phys(page); > > + put_user_page(page); > > + > > + if (unlikely(is_vm_hugetlb_page(vma))) > > + *pageshift = HPAGE_SHIFT; > > + else > > + *pageshift = PAGE_SHIFT; > > + > > if (is_gru_paddr(paddr)) > > goto inval; > > - paddr = paddr & ~((1UL << ps) - 1); > > + paddr = paddr & ~((1UL << *pageshift) - 1); > > *gpa = uv_soc_phys_ram_to_gpa(paddr); > > - *pageshift = ps; > > Why are you no longer setting *pageshift? There are a couple of callers > that both use this variable. Hi John, I did set *pageshift. The if statement above sets *pageshift. ps was used to retrive the pageshift value when the pte_lookup functions were present. ps was passed by reference to those functions and set by them. But here since we are trying to remove those functions, we don't need ps and we directly set *pageshift to HPAGE_SHIFT or PAGE_SHIFT based on the type of vma. Hope this clears things up? Thank you Bharath > > thanks, > -- > John Hubbard > NVIDIA
On Thu, Aug 08, 2019 at 04:30:48PM -0700, John Hubbard wrote: > On 8/8/19 4:21 PM, John Hubbard wrote: > > On 8/8/19 11:55 AM, Bharath Vedartham wrote: > > ... > >> if (is_gru_paddr(paddr)) > >> goto inval; > >> - paddr = paddr & ~((1UL << ps) - 1); > >> + paddr = paddr & ~((1UL << *pageshift) - 1); > >> *gpa = uv_soc_phys_ram_to_gpa(paddr); > >> - *pageshift = ps; > > > > Why are you no longer setting *pageshift? There are a couple of callers > > that both use this variable. > > > > > > ...and once that's figured out, I can fix it up here and send it up with > the next misc callsites series. I'm also inclined to make the commit > log read more like this: > > sgi-gru: Remove *pte_lookup functions, convert to put_user_page*() > > For pages that were retained via get_user_pages*(), release those pages > via the new put_user_page*() routines, instead of via put_page() or > release_pages(). > > This is part a tree-wide conversion, as described in commit fc1d8e7cca2d > ("mm: introduce put_user_page*(), placeholder versions"). > > As part of this conversion, the *pte_lookup functions can be removed and > be easily replaced with get_user_pages_fast() functions. In the case of > atomic lookup, __get_user_pages_fast() is used, because it does not fall > back to the slow path: get_user_pages(). get_user_pages_fast(), on the other > hand, first calls __get_user_pages_fast(), but then falls back to the > slow path if __get_user_pages_fast() fails. > > Also: remove unnecessary CONFIG_HUGETLB ifdefs. Sounds great! I will send the next version with an updated changelog! Thank you Bharath > > thanks, > -- > John Hubbard > NVIDIA
On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote: > On 8/8/19 11:55 AM, Bharath Vedartham wrote: > ... > > static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > > int write, int atomic, unsigned long *gpa, int *pageshift) > > { > > struct mm_struct *mm = gts->ts_mm; > > struct vm_area_struct *vma; > > unsigned long paddr; > > - int ret, ps; > > + int ret; > > + struct page *page; > > > > vma = find_vma(mm, vaddr); > > if (!vma) > > @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, > > > > /* > > * Atomic lookup is faster & usually works even if called in non-atomic > > - * context. > > + * context. get_user_pages_fast does atomic lookup before falling back to > > + * slow gup. > > */ > > rmb(); /* Must/check ms_range_active before loading PTEs */ > > - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); > > - if (ret) { > > - if (atomic) > > + if (atomic) { > > + ret = __get_user_pages_fast(vaddr, 1, write, &page); > > + if (!ret) > > goto upm; > > - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) > > + } else { > > + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page); > > + if (!ret) > > goto inval; > > } > > + > > + paddr = page_to_phys(page); > > + put_user_page(page); > > + > > + if (unlikely(is_vm_hugetlb_page(vma))) > > + *pageshift = HPAGE_SHIFT; > > + else > > + *pageshift = PAGE_SHIFT; > > + > > if (is_gru_paddr(paddr)) > > goto inval; > > - paddr = paddr & ~((1UL << ps) - 1); > > + paddr = paddr & ~((1UL << *pageshift) - 1); > > *gpa = uv_soc_phys_ram_to_gpa(paddr); > > - *pageshift = ps; > > Why are you no longer setting *pageshift? There are a couple of callers > that both use this variable. I ll send v5 once your convinced by my argument that ps is not necessary to set *pageshift and that *pageshift is being set. Thank you Bharath > > thanks, > -- > John Hubbard > NVIDIA
On 8/9/19 2:44 AM, Bharath Vedartham wrote: > On Thu, Aug 08, 2019 at 04:21:44PM -0700, John Hubbard wrote: >> On 8/8/19 11:55 AM, Bharath Vedartham wrote: >> ... >>> static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, >>> int write, int atomic, unsigned long *gpa, int *pageshift) >>> { >>> struct mm_struct *mm = gts->ts_mm; >>> struct vm_area_struct *vma; >>> unsigned long paddr; >>> - int ret, ps; >>> + int ret; >>> + struct page *page; >>> >>> vma = find_vma(mm, vaddr); >>> if (!vma) >>> @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, >>> >>> /* >>> * Atomic lookup is faster & usually works even if called in non-atomic >>> - * context. >>> + * context. get_user_pages_fast does atomic lookup before falling back to >>> + * slow gup. >>> */ >>> rmb(); /* Must/check ms_range_active before loading PTEs */ >>> - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); >>> - if (ret) { >>> - if (atomic) >>> + if (atomic) { >>> + ret = __get_user_pages_fast(vaddr, 1, write, &page); >>> + if (!ret) >>> goto upm; >>> - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) >>> + } else { >>> + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page); >>> + if (!ret) >>> goto inval; >>> } >>> + >>> + paddr = page_to_phys(page); >>> + put_user_page(page); >>> + >>> + if (unlikely(is_vm_hugetlb_page(vma))) >>> + *pageshift = HPAGE_SHIFT; >>> + else >>> + *pageshift = PAGE_SHIFT; >>> + >>> if (is_gru_paddr(paddr)) >>> goto inval; >>> - paddr = paddr & ~((1UL << ps) - 1); >>> + paddr = paddr & ~((1UL << *pageshift) - 1); >>> *gpa = uv_soc_phys_ram_to_gpa(paddr); >>> - *pageshift = ps; >> >> Why are you no longer setting *pageshift? There are a couple of callers >> that both use this variable. > Hi John, > > I did set *pageshift. The if statement above sets *pageshift. ps was > used to retrive the pageshift value when the pte_lookup functions were > present. ps was passed by reference to those functions and set by them. > But here since we are trying to remove those functions, we don't need ps > and we directly set *pageshift to HPAGE_SHIFT or PAGE_SHIFT based on the > type of vma. > > Hope this clears things up? > Right you are, sorry for overlooking that. Looks good. thanks,
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 4b713a8..304e9c5 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -166,96 +166,20 @@ static void get_clear_fault_map(struct gru_state *gru, } /* - * Atomic (interrupt context) & non-atomic (user context) functions to - * convert a vaddr into a physical address. The size of the page - * is returned in pageshift. - * returns: - * 0 - successful - * < 0 - error code - * 1 - (atomic only) try again in non-atomic context - */ -static int non_atomic_pte_lookup(struct vm_area_struct *vma, - unsigned long vaddr, int write, - unsigned long *paddr, int *pageshift) -{ - struct page *page; - -#ifdef CONFIG_HUGETLB_PAGE - *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; -#else - *pageshift = PAGE_SHIFT; -#endif - if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) - return -EFAULT; - *paddr = page_to_phys(page); - put_page(page); - return 0; -} - -/* - * atomic_pte_lookup + * mmap_sem is already helod on entry to this function. This guarantees + * existence of the page tables. * - * Convert a user virtual address to a physical address * Only supports Intel large pages (2MB only) on x86_64. - * ZZZ - hugepage support is incomplete - * - * NOTE: mmap_sem is already held on entry to this function. This - * guarantees existence of the page tables. + * ZZZ - hugepage support is incomplete. */ -static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, - int write, unsigned long *paddr, int *pageshift) -{ - pgd_t *pgdp; - p4d_t *p4dp; - pud_t *pudp; - pmd_t *pmdp; - pte_t pte; - - pgdp = pgd_offset(vma->vm_mm, vaddr); - if (unlikely(pgd_none(*pgdp))) - goto err; - - p4dp = p4d_offset(pgdp, vaddr); - if (unlikely(p4d_none(*p4dp))) - goto err; - - pudp = pud_offset(p4dp, vaddr); - if (unlikely(pud_none(*pudp))) - goto err; - - pmdp = pmd_offset(pudp, vaddr); - if (unlikely(pmd_none(*pmdp))) - goto err; -#ifdef CONFIG_X86_64 - if (unlikely(pmd_large(*pmdp))) - pte = *(pte_t *) pmdp; - else -#endif - pte = *pte_offset_kernel(pmdp, vaddr); - - if (unlikely(!pte_present(pte) || - (write && (!pte_write(pte) || !pte_dirty(pte))))) - return 1; - - *paddr = pte_pfn(pte) << PAGE_SHIFT; -#ifdef CONFIG_HUGETLB_PAGE - *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT; -#else - *pageshift = PAGE_SHIFT; -#endif - return 0; - -err: - return 1; -} - static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, int write, int atomic, unsigned long *gpa, int *pageshift) { struct mm_struct *mm = gts->ts_mm; struct vm_area_struct *vma; unsigned long paddr; - int ret, ps; + int ret; + struct page *page; vma = find_vma(mm, vaddr); if (!vma) @@ -263,21 +187,33 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr, /* * Atomic lookup is faster & usually works even if called in non-atomic - * context. + * context. get_user_pages_fast does atomic lookup before falling back to + * slow gup. */ rmb(); /* Must/check ms_range_active before loading PTEs */ - ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps); - if (ret) { - if (atomic) + if (atomic) { + ret = __get_user_pages_fast(vaddr, 1, write, &page); + if (!ret) goto upm; - if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps)) + } else { + ret = get_user_pages_fast(vaddr, 1, write ? FOLL_WRITE : 0, &page); + if (!ret) goto inval; } + + paddr = page_to_phys(page); + put_user_page(page); + + if (unlikely(is_vm_hugetlb_page(vma))) + *pageshift = HPAGE_SHIFT; + else + *pageshift = PAGE_SHIFT; + if (is_gru_paddr(paddr)) goto inval; - paddr = paddr & ~((1UL << ps) - 1); + paddr = paddr & ~((1UL << *pageshift) - 1); *gpa = uv_soc_phys_ram_to_gpa(paddr); - *pageshift = ps; + return VTOP_SUCCESS; inval: