@@ -121,6 +121,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+int copy_struct_from_iter(void *dst, size_t ksize, struct iov_iter *i,
+ size_t usize);
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
@@ -948,6 +948,88 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
+/**
+ * copy_struct_from_iter - copy a struct from an iov_iter
+ * @dst: Destination buffer.
+ * @ksize: Size of @dst struct.
+ * @i: Source iterator.
+ * @usize: (Alleged) size of struct in @i.
+ *
+ * Copies a struct from an iov_iter in a way that guarantees
+ * backwards-compatibility for struct arguments in an iovec (as long as the
+ * rules for copy_struct_from_user() are followed).
+ *
+ * The recommended usage is that @usize be taken from the current segment:
+ *
+ * int do_foo(struct iov_iter *i)
+ * {
+ * size_t usize = iov_iter_single_seg_count(i);
+ * struct foo karg;
+ * int err;
+ *
+ * if (usize > PAGE_SIZE)
+ * return -E2BIG;
+ * if (usize < FOO_SIZE_VER0)
+ * return -EINVAL;
+ * err = copy_struct_from_iter(&karg, sizeof(karg), i, usize);
+ * if (err)
+ * return err;
+ *
+ * // ...
+ * }
+ *
+ * Return: 0 on success, -errno on error (see copy_struct_from_user()).
+ *
+ * On success, the iterator is advanced @usize bytes. On error, the iterator is
+ * not advanced.
+ */
+int copy_struct_from_iter(void *dst, size_t ksize, struct iov_iter *i,
+ size_t usize)
+{
+ if (usize <= ksize) {
+ if (!copy_from_iter_full(dst, usize, i))
+ return -EFAULT;
+ memset(dst + usize, 0, ksize - usize);
+ } else {
+ size_t copied = 0, copy;
+ int ret;
+
+ if (WARN_ON(iov_iter_is_pipe(i)) || unlikely(i->count < usize))
+ return -EFAULT;
+ if (iter_is_iovec(i))
+ might_fault();
+ iterate_all_kinds(i, usize, v, ({
+ copy = min(ksize - copied, v.iov_len);
+ if (copy && copyin(dst + copied, v.iov_base, copy))
+ return -EFAULT;
+ copied += copy;
+ ret = check_zeroed_user(v.iov_base + copy,
+ v.iov_len - copy);
+ if (ret <= 0)
+ return ret ?: -E2BIG;
+ 0;}), ({
+ char *addr = kmap_atomic(v.bv_page);
+ copy = min_t(size_t, ksize - copied, v.bv_len);
+ memcpy(dst + copied, addr + v.bv_offset, copy);
+ copied += copy;
+ ret = memchr_inv(addr + v.bv_offset + copy, 0,
+ v.bv_len - copy) ? -E2BIG : 0;
+ kunmap_atomic(addr);
+ if (ret)
+ return ret;
+ }), ({
+ copy = min(ksize - copied, v.iov_len);
+ memcpy(dst + copied, v.iov_base, copy);
+ if (memchr_inv(v.iov_base, 0, v.iov_len))
+ return -E2BIG;
+ })
+ )
+ iov_iter_advance(i, usize);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(copy_struct_from_iter);
+
static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;