@@ -522,7 +522,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
- VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
+ /*
+ * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
+ * can co-exist
+ */
fault_flags |= FAULT_FLAG_TRIED;
}
@@ -938,17 +941,23 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
pages += ret;
start += ret << PAGE_SHIFT;
+ lock_dropped = true;
+retry:
/*
* Repeat on the address that fired VM_FAULT_RETRY
- * without FAULT_FLAG_ALLOW_RETRY but with
+ * with both FAULT_FLAG_ALLOW_RETRY and
* FAULT_FLAG_TRIED.
*/
*locked = 1;
- lock_dropped = true;
down_read(&mm->mmap_sem);
ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
- pages, NULL, NULL);
+ pages, NULL, locked);
+ if (!*locked) {
+ /* Continue to retry until we succeeded */
+ BUG_ON(ret != 0);
+ goto retry;
+ }
if (ret != 1) {
BUG_ON(ret > 1);
if (!pages_done)
This is the gup counterpart of the change that allows the VM_FAULT_RETRY to happen for more than once. Signed-off-by: Peter Xu <peterx@redhat.com> --- mm/gup.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-)