@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 frag_off;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -355,7 +355,7 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
spin_lock_bh(&vvs->rx_lock);
skb_queue_walk_safe(&vvs->rx_queue, skb, tmp) {
- off = 0;
+ off = VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
if (total == len)
break;
@@ -370,7 +370,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data + off, bytes);
+ err = skb_copy_datagram_iter(skb, off,
+ &msg->msg_iter,
+ bytes);
+
if (err)
goto out;
@@ -414,24 +417,28 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
skb = skb_peek(&vvs->rx_queue);
bytes = len - total;
- if (bytes > skb->len)
- bytes = skb->len;
+ if (bytes > skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off)
+ bytes = skb->len - VIRTIO_VSOCK_SKB_CB(skb)->frag_off;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes);
+ err = skb_copy_datagram_iter(skb,
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off,
+ &msg->msg_iter, bytes);
+
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
- skb_pull(skb, bytes);
- if (skb->len == 0) {
+ VIRTIO_VSOCK_SKB_CB(skb)->frag_off += bytes;
+
+ if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->frag_off) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
@@ -503,7 +510,10 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
*/
spin_unlock_bh(&vvs->rx_lock);
- err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
+ err = skb_copy_datagram_iter(skb, 0,
+ &msg->msg_iter,
+ bytes_to_copy);
+
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.
This is preparation patch for non-linear skbuff handling. It replaces direct calls of 'memcpy_to_msg()' with 'skb_copy_datagram_iter()'. Main advantage of the second one is that is can handle paged part of the skb by using 'kmap()' on each page, but if there are no pages in the skb, it behaves like simple copying to iov iterator. This patch also removes 'skb_pull()' calls, because it updates 'data' pointer of the skb (it is wrong thing to do with non-linear skb). Instead of updating 'data' and 'len' fields of skb, it adds new field to the control block of the skb: this value shows current offset to read next data from skb (no matter that this skb is linear or not), after each read from skb this field is incremented and once it reaches 'len', skb is considered done. Signed-off-by: Arseniy Krasnov <AVKrasnov@sberdevices.ru> --- include/linux/virtio_vsock.h | 1 + net/vmw_vsock/virtio_transport_common.c | 26 +++++++++++++++++-------- 2 files changed, 19 insertions(+), 8 deletions(-)