From patchwork Fri Dec 10 10:13:46 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Shirley Ma X-Patchwork-Id: 398062 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id oBAAESRo008776 for ; Fri, 10 Dec 2010 10:14:28 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753290Ab0LJKNx (ORCPT ); Fri, 10 Dec 2010 05:13:53 -0500 Received: from e8.ny.us.ibm.com ([32.97.182.138]:58111 "EHLO e8.ny.us.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751486Ab0LJKNv (ORCPT ); Fri, 10 Dec 2010 05:13:51 -0500 Received: from d01dlp01.pok.ibm.com (d01dlp01.pok.ibm.com [9.56.224.56]) by e8.ny.us.ibm.com (8.14.4/8.13.1) with ESMTP id oBA9u0CR028096; Fri, 10 Dec 2010 04:56:00 -0500 Received: from d01relay01.pok.ibm.com (d01relay01.pok.ibm.com [9.56.227.233]) by d01dlp01.pok.ibm.com (Postfix) with ESMTP id 540AE728045; Fri, 10 Dec 2010 05:13:50 -0500 (EST) Received: from d01av04.pok.ibm.com (d01av04.pok.ibm.com [9.56.224.64]) by d01relay01.pok.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id oBAADo2V395718; Fri, 10 Dec 2010 05:13:50 -0500 Received: from d01av04.pok.ibm.com (loopback [127.0.0.1]) by d01av04.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id oBAADn77016108; Fri, 10 Dec 2010 05:13:49 -0500 Received: from [9.65.207.31] (sig-9-65-207-31.mts.ibm.com [9.65.207.31]) by d01av04.pok.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id oBAADkTZ016060; Fri, 10 Dec 2010 05:13:47 -0500 Subject: [RFC PATCH V2 5/5] Add TX zero copy in macvtap From: Shirley Ma To: Avi Kivity , Arnd Bergmann , mst@redhat.com Cc: xiaohui.xin@intel.com, netdev@vger.kernel.org, kvm@vger.kernel.org, linux-kernel@vger.kernel.org Date: Fri, 10 Dec 2010 02:13:46 -0800 Message-ID: <1291976026.2167.49.camel@localhost.localdomain> Mime-Version: 1.0 X-Mailer: Evolution 2.28.3 (2.28.3-1.fc12) X-Content-Scanned: Fidelis XPS MAILER Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Fri, 10 Dec 2010 10:14:28 +0000 (UTC) drivers/net/macvtap.c | 128 ++++++++++++++++++++++++++++++++++++++++++++----- 1 files changed, 116 insertions(+), 12 deletions(-) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 4256727..2ec9692 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -60,6 +60,7 @@ static struct proto macvtap_proto = { */ static dev_t macvtap_major; #define MACVTAP_NUM_DEVS 65536 +#define GOODCOPY_LEN (L1_CACHE_BYTES < 128 ? 128 : L1_CACHE_BYTES) static struct class *macvtap_class; static struct cdev macvtap_cdev; @@ -338,6 +339,7 @@ static int macvtap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; struct net_device *dev = dev_get_by_index(net, iminor(inode)); + struct macvlan_dev *vlan = netdev_priv(dev); struct macvtap_queue *q; int err; @@ -367,6 +369,16 @@ static int macvtap_open(struct inode *inode, struct file *file) q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); + /* + * so far only VM uses macvtap, enable zero copy between guest + * kernel and host kernel when lower device supports high memory + * DMA + */ + if (vlan) { + if (vlan->lowerdev->features & NETIF_F_ZEROCOPY) + sock_set_flag(&q->sk, SOCK_ZEROCOPY); + } + err = macvtap_set_queue(dev, file, q); if (err) sock_put(&q->sk); @@ -431,6 +443,80 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, return skb; } +/* set skb frags from iovec, this can move to core network code for reuse */ +static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, + int offset, size_t count) +{ + int len = iov_length(from, count) - offset; + int copy = skb_headlen(skb); + int size, offset1 = 0; + int i = 0; + skb_frag_t *f; + + /* Skip over from offset */ + while (offset >= from->iov_len) { + offset -= from->iov_len; + ++from; + --count; + } + + /* copy up to skb headlen */ + while (copy > 0) { + size = min_t(unsigned int, copy, from->iov_len - offset); + if (copy_from_user(skb->data + offset1, from->iov_base + offset, + size)) + return -EFAULT; + if (copy > size) { + ++from; + --count; + } + copy -= size; + offset1 += size; + offset = 0; + } + + if (len == offset1) + return 0; + + while (count--) { + struct page *page[MAX_SKB_FRAGS]; + int num_pages; + unsigned long base; + + len = from->iov_len - offset1; + if (!len) { + offset1 = 0; + ++from; + continue; + } + base = (unsigned long)from->iov_base + offset1; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + num_pages = get_user_pages_fast(base, size, 0, &page[i]); + if ((num_pages != size) || + (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) + /* put_page is in skb free */ + return -EFAULT; + while (len) { + f = &skb_shinfo(skb)->frags[i]; + f->page = page[i]; + f->page_offset = base & ~PAGE_MASK; + f->size = min_t(int, len, PAGE_SIZE - f->page_offset); + skb->data_len += f->size; + skb->len += f->size; + skb->truesize += f->size; + skb_shinfo(skb)->nr_frags++; + /* increase sk_wmem_alloc */ + atomic_add(f->size, &skb->sk->sk_wmem_alloc); + base += f->size; + len -= f->size; + i++; + } + offset1 = 0; + ++from; + } + return 0; +} + /* * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should * be shared with the tun/tap driver. @@ -514,17 +600,19 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, /* Get packet from user space buffer */ -static ssize_t macvtap_get_user(struct macvtap_queue *q, - const struct iovec *iv, size_t count, - int noblock) +static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, + const struct iovec *iv, unsigned long total_len, + size_t count, int noblock) { struct sk_buff *skb; struct macvlan_dev *vlan; - size_t len = count; + unsigned long len = total_len; int err; struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; + int copylen, zerocopy; + zerocopy = sock_flag(&q->sk, SOCK_ZEROCOPY) && (len > GOODCOPY_LEN); if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = q->vnet_hdr_sz; @@ -550,12 +638,28 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, if (unlikely(len < ETH_HLEN)) goto err; - skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len, - noblock, &err); + if (zerocopy) + copylen = vnet_hdr.hdr_len; + else + copylen = len; + + skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, + vnet_hdr.hdr_len, noblock, &err); if (!skb) goto err; - - err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len); + + if (zerocopy) + err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); + else + err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, + len); + if (sock_flag(&q->sk, SOCK_ZEROCOPY)) { + struct skb_ubuf_info pend = + (struct skb_ubuf_info *)m->msg_control; + + skb_shinfo(skb)->ubuf.callback = pend.callback; + skb_shinfo(skb)->ubuf.desc = pend.desc; + } if (err) goto err_kfree; @@ -577,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, kfree_skb(skb); rcu_read_unlock_bh(); - return count; + return total_len; err_kfree: kfree_skb(skb); @@ -599,8 +703,8 @@ static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, ssize_t result = -ENOLINK; struct macvtap_queue *q = file->private_data; - result = macvtap_get_user(q, iv, iov_length(iv, count), - file->f_flags & O_NONBLOCK); + result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, + file->f_flags & O_NONBLOCK); return result; } @@ -813,7 +917,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); - return macvtap_get_user(q, m->msg_iov, total_len, + return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, m->msg_flags & MSG_DONTWAIT); }