diff mbox series

[08/44] copy_page_{to,from}_iter(): switch iovec variants to generic

Message ID 20220622041552.737754-8-viro@zeniv.linux.org.uk (mailing list archive)
State New, archived
Headers show
Series [01/44] 9p: handling Rerror without copy_from_iter_full() | expand

Commit Message

Al Viro June 22, 2022, 4:15 a.m. UTC
we can do copyin/copyout under kmap_local_page(); it shouldn't overflow
the kmap stack - the maximal footprint increase only by one here.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
---
 lib/iov_iter.c | 191 ++-----------------------------------------------
 1 file changed, 4 insertions(+), 187 deletions(-)

Comments

Jeff Layton June 27, 2022, 6:31 p.m. UTC | #1
On Wed, 2022-06-22 at 05:15 +0100, Al Viro wrote:
> we can do copyin/copyout under kmap_local_page(); it shouldn't overflow
> the kmap stack - the maximal footprint increase only by one here.
> 
> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
> ---
>  lib/iov_iter.c | 191 ++-----------------------------------------------
>  1 file changed, 4 insertions(+), 187 deletions(-)
> 
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index 6dd5330f7a99..4c658a25e29c 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n)
>  	return n;
>  }
>  
> -static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
> -			 struct iov_iter *i)
> -{
> -	size_t skip, copy, left, wanted;
> -	const struct iovec *iov;
> -	char __user *buf;
> -	void *kaddr, *from;
> -
> -	if (unlikely(bytes > i->count))
> -		bytes = i->count;
> -
> -	if (unlikely(!bytes))
> -		return 0;
> -
> -	might_fault();
> -	wanted = bytes;
> -	iov = i->iov;
> -	skip = i->iov_offset;
> -	buf = iov->iov_base + skip;
> -	copy = min(bytes, iov->iov_len - skip);
> -
> -	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
> -		kaddr = kmap_atomic(page);
> -		from = kaddr + offset;
> -
> -		/* first chunk, usually the only one */
> -		left = copyout(buf, from, copy);
> -		copy -= left;
> -		skip += copy;
> -		from += copy;
> -		bytes -= copy;
> -
> -		while (unlikely(!left && bytes)) {
> -			iov++;
> -			buf = iov->iov_base;
> -			copy = min(bytes, iov->iov_len);
> -			left = copyout(buf, from, copy);
> -			copy -= left;
> -			skip = copy;
> -			from += copy;
> -			bytes -= copy;
> -		}
> -		if (likely(!bytes)) {
> -			kunmap_atomic(kaddr);
> -			goto done;
> -		}
> -		offset = from - kaddr;
> -		buf += copy;
> -		kunmap_atomic(kaddr);
> -		copy = min(bytes, iov->iov_len - skip);
> -	}
> -	/* Too bad - revert to non-atomic kmap */
> -
> -	kaddr = kmap(page);
> -	from = kaddr + offset;
> -	left = copyout(buf, from, copy);
> -	copy -= left;
> -	skip += copy;
> -	from += copy;
> -	bytes -= copy;
> -	while (unlikely(!left && bytes)) {
> -		iov++;
> -		buf = iov->iov_base;
> -		copy = min(bytes, iov->iov_len);
> -		left = copyout(buf, from, copy);
> -		copy -= left;
> -		skip = copy;
> -		from += copy;
> -		bytes -= copy;
> -	}
> -	kunmap(page);
> -
> -done:
> -	if (skip == iov->iov_len) {
> -		iov++;
> -		skip = 0;
> -	}
> -	i->count -= wanted - bytes;
> -	i->nr_segs -= iov - i->iov;
> -	i->iov = iov;
> -	i->iov_offset = skip;
> -	return wanted - bytes;
> -}
> -
> -static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
> -			 struct iov_iter *i)
> -{
> -	size_t skip, copy, left, wanted;
> -	const struct iovec *iov;
> -	char __user *buf;
> -	void *kaddr, *to;
> -
> -	if (unlikely(bytes > i->count))
> -		bytes = i->count;
> -
> -	if (unlikely(!bytes))
> -		return 0;
> -
> -	might_fault();
> -	wanted = bytes;
> -	iov = i->iov;
> -	skip = i->iov_offset;
> -	buf = iov->iov_base + skip;
> -	copy = min(bytes, iov->iov_len - skip);
> -
> -	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
> -		kaddr = kmap_atomic(page);
> -		to = kaddr + offset;
> -
> -		/* first chunk, usually the only one */
> -		left = copyin(to, buf, copy);
> -		copy -= left;
> -		skip += copy;
> -		to += copy;
> -		bytes -= copy;
> -
> -		while (unlikely(!left && bytes)) {
> -			iov++;
> -			buf = iov->iov_base;
> -			copy = min(bytes, iov->iov_len);
> -			left = copyin(to, buf, copy);
> -			copy -= left;
> -			skip = copy;
> -			to += copy;
> -			bytes -= copy;
> -		}
> -		if (likely(!bytes)) {
> -			kunmap_atomic(kaddr);
> -			goto done;
> -		}
> -		offset = to - kaddr;
> -		buf += copy;
> -		kunmap_atomic(kaddr);
> -		copy = min(bytes, iov->iov_len - skip);
> -	}
> -	/* Too bad - revert to non-atomic kmap */
> -
> -	kaddr = kmap(page);
> -	to = kaddr + offset;
> -	left = copyin(to, buf, copy);
> -	copy -= left;
> -	skip += copy;
> -	to += copy;
> -	bytes -= copy;
> -	while (unlikely(!left && bytes)) {
> -		iov++;
> -		buf = iov->iov_base;
> -		copy = min(bytes, iov->iov_len);
> -		left = copyin(to, buf, copy);
> -		copy -= left;
> -		skip = copy;
> -		to += copy;
> -		bytes -= copy;
> -	}
> -	kunmap(page);
> -
> -done:
> -	if (skip == iov->iov_len) {
> -		iov++;
> -		skip = 0;
> -	}
> -	i->count -= wanted - bytes;
> -	i->nr_segs -= iov - i->iov;
> -	i->iov = iov;
> -	i->iov_offset = skip;
> -	return wanted - bytes;
> -}
> -
>  #ifdef PIPE_PARANOIA
>  static bool sanity(const struct iov_iter *i)
>  {
> @@ -848,24 +680,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
>  static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
>  			 struct iov_iter *i)
>  {
> -	if (likely(iter_is_iovec(i)))
> -		return copy_page_to_iter_iovec(page, offset, bytes, i);
> -	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
> +	if (unlikely(iov_iter_is_pipe(i))) {
> +		return copy_page_to_iter_pipe(page, offset, bytes, i);
> +	} else {
>  		void *kaddr = kmap_local_page(page);
>  		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
>  		kunmap_local(kaddr);
>  		return wanted;
>  	}
> -	if (iov_iter_is_pipe(i))
> -		return copy_page_to_iter_pipe(page, offset, bytes, i);
> -	if (unlikely(iov_iter_is_discard(i))) {
> -		if (unlikely(i->count < bytes))
> -			bytes = i->count;
> -		i->count -= bytes;
> -		return bytes;
> -	}
> -	WARN_ON(1);
> -	return 0;
>  }
>  
>  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
> @@ -896,17 +718,12 @@ EXPORT_SYMBOL(copy_page_to_iter);
>  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
>  			 struct iov_iter *i)
>  {
> -	if (unlikely(!page_copy_sane(page, offset, bytes)))
> -		return 0;
> -	if (likely(iter_is_iovec(i)))
> -		return copy_page_from_iter_iovec(page, offset, bytes, i);
> -	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
> +	if (page_copy_sane(page, offset, bytes)) {
>  		void *kaddr = kmap_local_page(page);
>  		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
>  		kunmap_local(kaddr);
>  		return wanted;
>  	}
> -	WARN_ON(1);
>  	return 0;
>  }
>  EXPORT_SYMBOL(copy_page_from_iter);

Love it.

Reviewed-by: Jeff Layton <jlayton@kernel.org>
Christian Brauner June 28, 2022, 12:32 p.m. UTC | #2
On Wed, Jun 22, 2022 at 05:15:16AM +0100, Al Viro wrote:
> we can do copyin/copyout under kmap_local_page(); it shouldn't overflow
> the kmap stack - the maximal footprint increase only by one here.
> 
> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
> ---

Assuming the WARN_ON(1) removals are intentional,
Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org>

>  lib/iov_iter.c | 191 ++-----------------------------------------------
>  1 file changed, 4 insertions(+), 187 deletions(-)
> 
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index 6dd5330f7a99..4c658a25e29c 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n)
>  	return n;
>  }
>  
> -static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
> -			 struct iov_iter *i)
> -{
> -	size_t skip, copy, left, wanted;
> -	const struct iovec *iov;
> -	char __user *buf;
> -	void *kaddr, *from;
> -
> -	if (unlikely(bytes > i->count))
> -		bytes = i->count;
> -
> -	if (unlikely(!bytes))
> -		return 0;
> -
> -	might_fault();
> -	wanted = bytes;
> -	iov = i->iov;
> -	skip = i->iov_offset;
> -	buf = iov->iov_base + skip;
> -	copy = min(bytes, iov->iov_len - skip);
> -
> -	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
> -		kaddr = kmap_atomic(page);
> -		from = kaddr + offset;
> -
> -		/* first chunk, usually the only one */
> -		left = copyout(buf, from, copy);
> -		copy -= left;
> -		skip += copy;
> -		from += copy;
> -		bytes -= copy;
> -
> -		while (unlikely(!left && bytes)) {
> -			iov++;
> -			buf = iov->iov_base;
> -			copy = min(bytes, iov->iov_len);
> -			left = copyout(buf, from, copy);
> -			copy -= left;
> -			skip = copy;
> -			from += copy;
> -			bytes -= copy;
> -		}
> -		if (likely(!bytes)) {
> -			kunmap_atomic(kaddr);
> -			goto done;
> -		}
> -		offset = from - kaddr;
> -		buf += copy;
> -		kunmap_atomic(kaddr);
> -		copy = min(bytes, iov->iov_len - skip);
> -	}
> -	/* Too bad - revert to non-atomic kmap */
> -
> -	kaddr = kmap(page);
> -	from = kaddr + offset;
> -	left = copyout(buf, from, copy);
> -	copy -= left;
> -	skip += copy;
> -	from += copy;
> -	bytes -= copy;
> -	while (unlikely(!left && bytes)) {
> -		iov++;
> -		buf = iov->iov_base;
> -		copy = min(bytes, iov->iov_len);
> -		left = copyout(buf, from, copy);
> -		copy -= left;
> -		skip = copy;
> -		from += copy;
> -		bytes -= copy;
> -	}
> -	kunmap(page);
> -
> -done:
> -	if (skip == iov->iov_len) {
> -		iov++;
> -		skip = 0;
> -	}
> -	i->count -= wanted - bytes;
> -	i->nr_segs -= iov - i->iov;
> -	i->iov = iov;
> -	i->iov_offset = skip;
> -	return wanted - bytes;
> -}
> -
> -static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
> -			 struct iov_iter *i)
> -{
> -	size_t skip, copy, left, wanted;
> -	const struct iovec *iov;
> -	char __user *buf;
> -	void *kaddr, *to;
> -
> -	if (unlikely(bytes > i->count))
> -		bytes = i->count;
> -
> -	if (unlikely(!bytes))
> -		return 0;
> -
> -	might_fault();
> -	wanted = bytes;
> -	iov = i->iov;
> -	skip = i->iov_offset;
> -	buf = iov->iov_base + skip;
> -	copy = min(bytes, iov->iov_len - skip);
> -
> -	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
> -		kaddr = kmap_atomic(page);
> -		to = kaddr + offset;
> -
> -		/* first chunk, usually the only one */
> -		left = copyin(to, buf, copy);
> -		copy -= left;
> -		skip += copy;
> -		to += copy;
> -		bytes -= copy;
> -
> -		while (unlikely(!left && bytes)) {
> -			iov++;
> -			buf = iov->iov_base;
> -			copy = min(bytes, iov->iov_len);
> -			left = copyin(to, buf, copy);
> -			copy -= left;
> -			skip = copy;
> -			to += copy;
> -			bytes -= copy;
> -		}
> -		if (likely(!bytes)) {
> -			kunmap_atomic(kaddr);
> -			goto done;
> -		}
> -		offset = to - kaddr;
> -		buf += copy;
> -		kunmap_atomic(kaddr);
> -		copy = min(bytes, iov->iov_len - skip);
> -	}
> -	/* Too bad - revert to non-atomic kmap */
> -
> -	kaddr = kmap(page);
> -	to = kaddr + offset;
> -	left = copyin(to, buf, copy);
> -	copy -= left;
> -	skip += copy;
> -	to += copy;
> -	bytes -= copy;
> -	while (unlikely(!left && bytes)) {
> -		iov++;
> -		buf = iov->iov_base;
> -		copy = min(bytes, iov->iov_len);
> -		left = copyin(to, buf, copy);
> -		copy -= left;
> -		skip = copy;
> -		to += copy;
> -		bytes -= copy;
> -	}
> -	kunmap(page);
> -
> -done:
> -	if (skip == iov->iov_len) {
> -		iov++;
> -		skip = 0;
> -	}
> -	i->count -= wanted - bytes;
> -	i->nr_segs -= iov - i->iov;
> -	i->iov = iov;
> -	i->iov_offset = skip;
> -	return wanted - bytes;
> -}
> -
>  #ifdef PIPE_PARANOIA
>  static bool sanity(const struct iov_iter *i)
>  {
> @@ -848,24 +680,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
>  static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
>  			 struct iov_iter *i)
>  {
> -	if (likely(iter_is_iovec(i)))
> -		return copy_page_to_iter_iovec(page, offset, bytes, i);
> -	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
> +	if (unlikely(iov_iter_is_pipe(i))) {
> +		return copy_page_to_iter_pipe(page, offset, bytes, i);
> +	} else {
>  		void *kaddr = kmap_local_page(page);
>  		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
>  		kunmap_local(kaddr);
>  		return wanted;
>  	}
> -	if (iov_iter_is_pipe(i))
> -		return copy_page_to_iter_pipe(page, offset, bytes, i);
> -	if (unlikely(iov_iter_is_discard(i))) {
> -		if (unlikely(i->count < bytes))
> -			bytes = i->count;
> -		i->count -= bytes;
> -		return bytes;
> -	}
> -	WARN_ON(1);
> -	return 0;
>  }
>  
>  size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
> @@ -896,17 +718,12 @@ EXPORT_SYMBOL(copy_page_to_iter);
>  size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
>  			 struct iov_iter *i)
>  {
> -	if (unlikely(!page_copy_sane(page, offset, bytes)))
> -		return 0;
> -	if (likely(iter_is_iovec(i)))
> -		return copy_page_from_iter_iovec(page, offset, bytes, i);
> -	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
> +	if (page_copy_sane(page, offset, bytes)) {
>  		void *kaddr = kmap_local_page(page);
>  		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
>  		kunmap_local(kaddr);
>  		return wanted;
>  	}
> -	WARN_ON(1);
>  	return 0;
>  }
>  EXPORT_SYMBOL(copy_page_from_iter);
> -- 
> 2.30.2
>
Al Viro June 28, 2022, 6:36 p.m. UTC | #3
On Tue, Jun 28, 2022 at 02:32:05PM +0200, Christian Brauner wrote:
> On Wed, Jun 22, 2022 at 05:15:16AM +0100, Al Viro wrote:
> > we can do copyin/copyout under kmap_local_page(); it shouldn't overflow
> > the kmap stack - the maximal footprint increase only by one here.
> > 
> > Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
> > ---
> 
> Assuming the WARN_ON(1) removals are intentional,
> Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org>

Deliberate - it shouldn't be any different from what _copy_to_iter() and
_copy_from_iter() are ready to handle.
diff mbox series

Patch

diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 6dd5330f7a99..4c658a25e29c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -168,174 +168,6 @@  static int copyin(void *to, const void __user *from, size_t n)
 	return n;
 }
 
-static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
-{
-	size_t skip, copy, left, wanted;
-	const struct iovec *iov;
-	char __user *buf;
-	void *kaddr, *from;
-
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
-	might_fault();
-	wanted = bytes;
-	iov = i->iov;
-	skip = i->iov_offset;
-	buf = iov->iov_base + skip;
-	copy = min(bytes, iov->iov_len - skip);
-
-	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
-		kaddr = kmap_atomic(page);
-		from = kaddr + offset;
-
-		/* first chunk, usually the only one */
-		left = copyout(buf, from, copy);
-		copy -= left;
-		skip += copy;
-		from += copy;
-		bytes -= copy;
-
-		while (unlikely(!left && bytes)) {
-			iov++;
-			buf = iov->iov_base;
-			copy = min(bytes, iov->iov_len);
-			left = copyout(buf, from, copy);
-			copy -= left;
-			skip = copy;
-			from += copy;
-			bytes -= copy;
-		}
-		if (likely(!bytes)) {
-			kunmap_atomic(kaddr);
-			goto done;
-		}
-		offset = from - kaddr;
-		buf += copy;
-		kunmap_atomic(kaddr);
-		copy = min(bytes, iov->iov_len - skip);
-	}
-	/* Too bad - revert to non-atomic kmap */
-
-	kaddr = kmap(page);
-	from = kaddr + offset;
-	left = copyout(buf, from, copy);
-	copy -= left;
-	skip += copy;
-	from += copy;
-	bytes -= copy;
-	while (unlikely(!left && bytes)) {
-		iov++;
-		buf = iov->iov_base;
-		copy = min(bytes, iov->iov_len);
-		left = copyout(buf, from, copy);
-		copy -= left;
-		skip = copy;
-		from += copy;
-		bytes -= copy;
-	}
-	kunmap(page);
-
-done:
-	if (skip == iov->iov_len) {
-		iov++;
-		skip = 0;
-	}
-	i->count -= wanted - bytes;
-	i->nr_segs -= iov - i->iov;
-	i->iov = iov;
-	i->iov_offset = skip;
-	return wanted - bytes;
-}
-
-static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
-{
-	size_t skip, copy, left, wanted;
-	const struct iovec *iov;
-	char __user *buf;
-	void *kaddr, *to;
-
-	if (unlikely(bytes > i->count))
-		bytes = i->count;
-
-	if (unlikely(!bytes))
-		return 0;
-
-	might_fault();
-	wanted = bytes;
-	iov = i->iov;
-	skip = i->iov_offset;
-	buf = iov->iov_base + skip;
-	copy = min(bytes, iov->iov_len - skip);
-
-	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
-		kaddr = kmap_atomic(page);
-		to = kaddr + offset;
-
-		/* first chunk, usually the only one */
-		left = copyin(to, buf, copy);
-		copy -= left;
-		skip += copy;
-		to += copy;
-		bytes -= copy;
-
-		while (unlikely(!left && bytes)) {
-			iov++;
-			buf = iov->iov_base;
-			copy = min(bytes, iov->iov_len);
-			left = copyin(to, buf, copy);
-			copy -= left;
-			skip = copy;
-			to += copy;
-			bytes -= copy;
-		}
-		if (likely(!bytes)) {
-			kunmap_atomic(kaddr);
-			goto done;
-		}
-		offset = to - kaddr;
-		buf += copy;
-		kunmap_atomic(kaddr);
-		copy = min(bytes, iov->iov_len - skip);
-	}
-	/* Too bad - revert to non-atomic kmap */
-
-	kaddr = kmap(page);
-	to = kaddr + offset;
-	left = copyin(to, buf, copy);
-	copy -= left;
-	skip += copy;
-	to += copy;
-	bytes -= copy;
-	while (unlikely(!left && bytes)) {
-		iov++;
-		buf = iov->iov_base;
-		copy = min(bytes, iov->iov_len);
-		left = copyin(to, buf, copy);
-		copy -= left;
-		skip = copy;
-		to += copy;
-		bytes -= copy;
-	}
-	kunmap(page);
-
-done:
-	if (skip == iov->iov_len) {
-		iov++;
-		skip = 0;
-	}
-	i->count -= wanted - bytes;
-	i->nr_segs -= iov - i->iov;
-	i->iov = iov;
-	i->iov_offset = skip;
-	return wanted - bytes;
-}
-
 #ifdef PIPE_PARANOIA
 static bool sanity(const struct iov_iter *i)
 {
@@ -848,24 +680,14 @@  static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i)
 {
-	if (likely(iter_is_iovec(i)))
-		return copy_page_to_iter_iovec(page, offset, bytes, i);
-	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+	if (unlikely(iov_iter_is_pipe(i))) {
+		return copy_page_to_iter_pipe(page, offset, bytes, i);
+	} else {
 		void *kaddr = kmap_local_page(page);
 		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
 		kunmap_local(kaddr);
 		return wanted;
 	}
-	if (iov_iter_is_pipe(i))
-		return copy_page_to_iter_pipe(page, offset, bytes, i);
-	if (unlikely(iov_iter_is_discard(i))) {
-		if (unlikely(i->count < bytes))
-			bytes = i->count;
-		i->count -= bytes;
-		return bytes;
-	}
-	WARN_ON(1);
-	return 0;
 }
 
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
@@ -896,17 +718,12 @@  EXPORT_SYMBOL(copy_page_to_iter);
 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i)
 {
-	if (unlikely(!page_copy_sane(page, offset, bytes)))
-		return 0;
-	if (likely(iter_is_iovec(i)))
-		return copy_page_from_iter_iovec(page, offset, bytes, i);
-	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+	if (page_copy_sane(page, offset, bytes)) {
 		void *kaddr = kmap_local_page(page);
 		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
 		kunmap_local(kaddr);
 		return wanted;
 	}
-	WARN_ON(1);
 	return 0;
 }
 EXPORT_SYMBOL(copy_page_from_iter);