Message ID | 20191103211813.213227-9-jhubbard@nvidia.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | mm/gup: track dma-pinned pages: FOLL_PIN, FOLL_LONGTERM | expand |
On Sun, Nov 03, 2019 at 01:18:03PM -0800, John Hubbard wrote: > Convert process_vm_access to use the new pin_user_pages_remote() > call, which sets FOLL_PIN. Setting FOLL_PIN is now required for > code that requires tracking of pinned pages. > > Also, release the pages via put_user_page*(). > > Also, rename "pages" to "pinned_pages", as this makes for > easier reading of process_vm_rw_single_vec(). > > Reviewed-by: Ira Weiny <ira.weiny@intel.com> > Signed-off-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jérôme Glisse <jglisse@redhat.com> > --- > mm/process_vm_access.c | 28 +++++++++++++++------------- > 1 file changed, 15 insertions(+), 13 deletions(-) > > diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c > index 357aa7bef6c0..fd20ab675b85 100644 > --- a/mm/process_vm_access.c > +++ b/mm/process_vm_access.c > @@ -42,12 +42,11 @@ static int process_vm_rw_pages(struct page **pages, > if (copy > len) > copy = len; > > - if (vm_write) { > + if (vm_write) > copied = copy_page_from_iter(page, offset, copy, iter); > - set_page_dirty_lock(page); > - } else { > + else > copied = copy_page_to_iter(page, offset, copy, iter); > - } > + > len -= copied; > if (copied < copy && iov_iter_count(iter)) > return -EFAULT; > @@ -96,7 +95,7 @@ static int process_vm_rw_single_vec(unsigned long addr, > flags |= FOLL_WRITE; > > while (!rc && nr_pages && iov_iter_count(iter)) { > - int pages = min(nr_pages, max_pages_per_loop); > + int pinned_pages = min(nr_pages, max_pages_per_loop); > int locked = 1; > size_t bytes; > > @@ -106,14 +105,15 @@ static int process_vm_rw_single_vec(unsigned long addr, > * current/current->mm > */ > down_read(&mm->mmap_sem); > - pages = get_user_pages_remote(task, mm, pa, pages, flags, > - process_pages, NULL, &locked); > + pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, > + flags, process_pages, > + NULL, &locked); > if (locked) > up_read(&mm->mmap_sem); > - if (pages <= 0) > + if (pinned_pages <= 0) > return -EFAULT; > > - bytes = pages * PAGE_SIZE - start_offset; > + bytes = pinned_pages * PAGE_SIZE - start_offset; > if (bytes > len) > bytes = len; > > @@ -122,10 +122,12 @@ static int process_vm_rw_single_vec(unsigned long addr, > vm_write); > len -= bytes; > start_offset = 0; > - nr_pages -= pages; > - pa += pages * PAGE_SIZE; > - while (pages) > - put_page(process_pages[--pages]); > + nr_pages -= pinned_pages; > + pa += pinned_pages * PAGE_SIZE; > + > + /* If vm_write is set, the pages need to be made dirty: */ > + put_user_pages_dirty_lock(process_pages, pinned_pages, > + vm_write); > } > > return rc; > -- > 2.23.0 >
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 357aa7bef6c0..fd20ab675b85 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -42,12 +42,11 @@ static int process_vm_rw_pages(struct page **pages, if (copy > len) copy = len; - if (vm_write) { + if (vm_write) copied = copy_page_from_iter(page, offset, copy, iter); - set_page_dirty_lock(page); - } else { + else copied = copy_page_to_iter(page, offset, copy, iter); - } + len -= copied; if (copied < copy && iov_iter_count(iter)) return -EFAULT; @@ -96,7 +95,7 @@ static int process_vm_rw_single_vec(unsigned long addr, flags |= FOLL_WRITE; while (!rc && nr_pages && iov_iter_count(iter)) { - int pages = min(nr_pages, max_pages_per_loop); + int pinned_pages = min(nr_pages, max_pages_per_loop); int locked = 1; size_t bytes; @@ -106,14 +105,15 @@ static int process_vm_rw_single_vec(unsigned long addr, * current/current->mm */ down_read(&mm->mmap_sem); - pages = get_user_pages_remote(task, mm, pa, pages, flags, - process_pages, NULL, &locked); + pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages, + flags, process_pages, + NULL, &locked); if (locked) up_read(&mm->mmap_sem); - if (pages <= 0) + if (pinned_pages <= 0) return -EFAULT; - bytes = pages * PAGE_SIZE - start_offset; + bytes = pinned_pages * PAGE_SIZE - start_offset; if (bytes > len) bytes = len; @@ -122,10 +122,12 @@ static int process_vm_rw_single_vec(unsigned long addr, vm_write); len -= bytes; start_offset = 0; - nr_pages -= pages; - pa += pages * PAGE_SIZE; - while (pages) - put_page(process_pages[--pages]); + nr_pages -= pinned_pages; + pa += pinned_pages * PAGE_SIZE; + + /* If vm_write is set, the pages need to be made dirty: */ + put_user_pages_dirty_lock(process_pages, pinned_pages, + vm_write); } return rc;