diff mbox series

[RFC,v1,2/8] vhost/vsock: rework packet allocation logic

Message ID 988e9e3c-7993-d6e2-626d-deb46248ed9f@sberdevices.ru (mailing list archive)
State RFC
Headers show
Series virtio/vsock: experimental zerocopy receive | expand

Checks

Context Check Description
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 5 this patch: 5
netdev/cc_maintainers success CCed 7 of 7 maintainers
netdev/build_clang fail Errors and warnings before: 6 this patch: 6
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 5 this patch: 5
netdev/checkpatch warning CHECK: Comparison to NULL could be written "!buf_page" CHECK: Comparison to NULL could be written "!mapped"
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Arseniy Krasnov May 12, 2022, 5:09 a.m. UTC
For packets received from virtio RX queue, use buddy
allocator instead of 'kmalloc()' to be able to insert
such pages to user provided vma. Single call to
'copy_from_iter()' replaced with per-page loop.

Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru>
---
 drivers/vhost/vsock.c | 49 ++++++++++++++++++++++++++++++++++++-------
 1 file changed, 41 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 37f0b4274113..157798985389 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -360,6 +360,9 @@  vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
 	struct iov_iter iov_iter;
 	size_t nbytes;
 	size_t len;
+	struct page *buf_page;
+	ssize_t pkt_len;
+	int page_idx;
 
 	if (in != 0) {
 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
@@ -393,20 +396,50 @@  vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
 		return NULL;
 	}
 
-	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
-	if (!pkt->buf) {
+	/* This creates memory overrun, as we allocate
+	 * at least one page for each packet.
+	 */
+	buf_page = alloc_pages(GFP_KERNEL, get_order(pkt->len));
+
+	if (buf_page == NULL) {
 		kfree(pkt);
 		return NULL;
 	}
 
+	pkt->buf = page_to_virt(buf_page);
 	pkt->buf_len = pkt->len;
 
-	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
-	if (nbytes != pkt->len) {
-		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
-		       pkt->len, nbytes);
-		virtio_transport_free_pkt(pkt);
-		return NULL;
+	page_idx = 0;
+	pkt_len = pkt->len;
+
+	/* As allocated pages are not mapped, process
+	 * pages one by one.
+	 */
+	while (pkt_len > 0) {
+		void *mapped;
+		size_t to_copy;
+
+		mapped = kmap(buf_page + page_idx);
+
+		if (mapped == NULL) {
+			virtio_transport_free_pkt(pkt);
+			return NULL;
+		}
+
+		to_copy = min(pkt_len, ((ssize_t)PAGE_SIZE));
+
+		nbytes = copy_from_iter(mapped, to_copy, &iov_iter);
+		if (nbytes != to_copy) {
+			vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
+			       to_copy, nbytes);
+			virtio_transport_free_pkt(pkt);
+			return NULL;
+		}
+
+		kunmap(mapped);
+
+		pkt_len -= to_copy;
+		page_idx++;
 	}
 
 	return pkt;