diff mbox series

[RFC,6/9] iov_iter: Add copy kunit tests for ITER_UBUF and ITER_IOVEC

Message ID 20230914221526.3153402-7-dhowells@redhat.com (mailing list archive)
State New, archived
Headers show
Series iov_iter: kunit: Cleanup, abstraction and more tests | expand

Commit Message

David Howells Sept. 14, 2023, 10:15 p.m. UTC
Add copy kunit tests for ITER_UBUF- and ITER_IOVEC-type iterators.  This
attaches a userspace VM with a mapped file in it temporarily to the test
thread.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Andrew Morton <akpm@linux-foundation.org>
cc: Christoph Hellwig <hch@lst.de>
cc: Christian Brauner <brauner@kernel.org>
cc: Jens Axboe <axboe@kernel.dk>
cc: Al Viro <viro@zeniv.linux.org.uk>
cc: Matthew Wilcox <willy@infradead.org>
cc: David Hildenbrand <david@redhat.com>
cc: John Hubbard <jhubbard@nvidia.com>
cc: Brendan Higgins <brendanhiggins@google.com>
cc: David Gow <davidgow@google.com>
cc: linux-mm@kvack.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-kselftest@vger.kernel.org
cc: kunit-dev@googlegroups.com
---
 lib/kunit_iov_iter.c | 200 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 200 insertions(+)
diff mbox series

Patch

diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 3353bca9c40f..78f566ebd4a6 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -309,6 +309,202 @@  static u8 __user *__init iov_kunit_create_user_buf(struct kunit *test,
 	return buffer;
 }
 
+/*
+ * Test copying to an ITER_UBUF-type iterator.
+ */
+static void __init iov_kunit_copy_to_ubuf(struct kunit *test)
+{
+	const struct iov_kunit_range *pr;
+	struct iov_iter iter;
+	struct page **spages;
+	u8 __user *buffer;
+	u8 *scratch;
+	ssize_t uncleared;
+	size_t bufsize, npages, size, copied;
+	int i;
+
+	bufsize = 0x100000;
+	npages = bufsize / PAGE_SIZE;
+
+	scratch = iov_kunit_create_buffer(test, &spages, npages);
+	for (i = 0; i < bufsize; i++)
+		scratch[i] = pattern(i);
+
+	buffer = iov_kunit_create_user_buf(test, npages, NULL);
+	uncleared = clear_user(buffer, bufsize);
+	KUNIT_EXPECT_EQ(test, uncleared, 0);
+	if (uncleared)
+		return;
+
+	i = 0;
+	for (pr = kvec_test_ranges; pr->page >= 0; pr++) {
+		size = pr->to - pr->from;
+		KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+		iov_iter_ubuf(&iter, ITER_DEST, buffer + pr->from, size);
+		copied = copy_to_iter(scratch + i, size, &iter);
+
+		KUNIT_EXPECT_EQ(test, copied, size);
+		KUNIT_EXPECT_EQ(test, iter.count, 0);
+		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+		if (test->status == KUNIT_FAILURE)
+			break;
+		i += size;
+	}
+
+	iov_kunit_build_to_reference_pattern(test, scratch, bufsize, kvec_test_ranges);
+	iov_kunit_check_user_pattern(test, buffer, scratch, bufsize);
+	KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from an ITER_UBUF-type iterator.
+ */
+static void __init iov_kunit_copy_from_ubuf(struct kunit *test)
+{
+	const struct iov_kunit_range *pr;
+	struct iov_iter iter;
+	struct page **spages;
+	u8 __user *buffer;
+	u8 *scratch, *reference;
+	size_t bufsize, npages, size, copied;
+	int i;
+
+	bufsize = 0x100000;
+	npages = bufsize / PAGE_SIZE;
+
+	buffer = iov_kunit_create_user_buf(test, npages, NULL);
+	iov_kunit_fill_user_buf(test, buffer, bufsize);
+
+	scratch = iov_kunit_create_buffer(test, &spages, npages);
+	memset(scratch, 0, bufsize);
+
+	reference = iov_kunit_create_buffer(test, &spages, npages);
+
+	i = 0;
+	for (pr = kvec_test_ranges; pr->page >= 0; pr++) {
+		size = pr->to - pr->from;
+		KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+		iov_iter_ubuf(&iter, ITER_SOURCE, buffer + pr->from, size);
+		copied = copy_from_iter(scratch + i, size, &iter);
+
+		KUNIT_EXPECT_EQ(test, copied, size);
+		KUNIT_EXPECT_EQ(test, iter.count, 0);
+		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+		if (test->status == KUNIT_FAILURE)
+			break;
+		i += size;
+	}
+
+	iov_kunit_build_from_reference_pattern(test, reference, bufsize, kvec_test_ranges);
+	iov_kunit_check_pattern(test, scratch, reference, bufsize);
+	KUNIT_SUCCEED();
+}
+
+static void __init iov_kunit_load_iovec(struct kunit *test,
+					struct iov_iter *iter, int dir,
+					struct iovec *iov, unsigned int iovmax,
+					u8 __user *buffer, size_t bufsize,
+					const struct iov_kunit_range *pr)
+{
+	size_t size = 0;
+	int i;
+
+	for (i = 0; i < iovmax; i++, pr++) {
+		if (pr->page < 0)
+			break;
+		KUNIT_ASSERT_GE(test, pr->to, pr->from);
+		KUNIT_ASSERT_LE(test, pr->to, bufsize);
+		iov[i].iov_base = buffer + pr->from;
+		iov[i].iov_len = pr->to - pr->from;
+		size += pr->to - pr->from;
+	}
+	KUNIT_ASSERT_LE(test, size, bufsize);
+
+	iov_iter_init(iter, dir, iov, i, size);
+}
+
+/*
+ * Test copying to an ITER_IOVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_iovec(struct kunit *test)
+{
+	struct iov_iter iter;
+	struct page **spages;
+	struct iovec iov[8];
+	u8 __user *buffer;
+	u8 *scratch;
+	ssize_t uncleared;
+	size_t bufsize, npages, size, copied;
+	int i;
+
+	bufsize = 0x100000;
+	npages = bufsize / PAGE_SIZE;
+
+	scratch = iov_kunit_create_buffer(test, &spages, npages);
+	for (i = 0; i < bufsize; i++)
+		scratch[i] = pattern(i);
+
+	buffer = iov_kunit_create_user_buf(test, npages, NULL);
+	uncleared = clear_user(buffer, bufsize);
+	KUNIT_EXPECT_EQ(test, uncleared, 0);
+	if (uncleared)
+		return;
+
+	iov_kunit_load_iovec(test, &iter, ITER_DEST, iov, ARRAY_SIZE(iov),
+			     buffer, bufsize, kvec_test_ranges);
+	size = iter.count;
+
+	copied = copy_to_iter(scratch, size, &iter);
+
+	KUNIT_EXPECT_EQ(test, copied, size);
+	KUNIT_EXPECT_EQ(test, iter.count, 0);
+	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+	iov_kunit_build_to_reference_pattern(test, scratch, bufsize, kvec_test_ranges);
+	iov_kunit_check_user_pattern(test, buffer, scratch, bufsize);
+	KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from an ITER_IOVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_iovec(struct kunit *test)
+{
+	struct iov_iter iter;
+	struct page **spages;
+	struct iovec iov[8];
+	u8 __user *buffer;
+	u8 *scratch, *reference;
+	size_t bufsize, npages, size, copied;
+
+	bufsize = 0x100000;
+	npages = bufsize / PAGE_SIZE;
+
+	buffer = iov_kunit_create_user_buf(test, npages, NULL);
+	iov_kunit_fill_user_buf(test, buffer, bufsize);
+
+	scratch = iov_kunit_create_buffer(test, &spages, npages);
+	memset(scratch, 0, bufsize);
+
+	reference = iov_kunit_create_buffer(test, &spages, npages);
+
+	iov_kunit_load_iovec(test, &iter, ITER_SOURCE, iov, ARRAY_SIZE(iov),
+			     buffer, bufsize, kvec_test_ranges);
+	size = iter.count;
+
+	copied = copy_from_iter(scratch, size, &iter);
+
+	KUNIT_EXPECT_EQ(test, copied, size);
+	KUNIT_EXPECT_EQ(test, iter.count, 0);
+	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+	iov_kunit_build_from_reference_pattern(test, reference, bufsize, kvec_test_ranges);
+	iov_kunit_check_pattern(test, reference, scratch, bufsize);
+	KUNIT_SUCCEED();
+}
+
 static void __init iov_kunit_load_kvec(struct kunit *test,
 				       struct iov_iter *iter, int dir,
 				       struct kvec *kvec, unsigned int kvmax,
@@ -884,6 +1080,10 @@  static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
 }
 
 static struct kunit_case __refdata iov_kunit_cases[] = {
+	KUNIT_CASE(iov_kunit_copy_to_ubuf),
+	KUNIT_CASE(iov_kunit_copy_from_ubuf),
+	KUNIT_CASE(iov_kunit_copy_to_iovec),
+	KUNIT_CASE(iov_kunit_copy_from_iovec),
 	KUNIT_CASE(iov_kunit_copy_to_kvec),
 	KUNIT_CASE(iov_kunit_copy_from_kvec),
 	KUNIT_CASE(iov_kunit_copy_to_bvec),