Message ID | 1530596284-4101-2-git-send-email-jasowang@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi Jason,
I love your patch! Yet something to improve:
[auto build test ERROR on net-next/master]
url: https://github.com/0day-ci/linux/commits/Jason-Wang/Packed-virtqueue-for-vhost/20180703-154751
config: x86_64-randconfig-s0-07032254 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64
Note: the linux-review/Jason-Wang/Packed-virtqueue-for-vhost/20180703-154751 HEAD 01b902f1126212ea2597e6d09802bd9c4431bf82 builds fine.
It only hurts bisectibility.
All errors (new ones prefixed by >>):
drivers//vhost/net.c: In function 'handle_rx':
>> drivers//vhost/net.c:738:15: error: implicit declaration of function 'get_rx_bufs' [-Werror=implicit-function-declaration]
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
^~~~~~~~~~~
cc1: some warnings being treated as errors
vim +/get_rx_bufs +738 drivers//vhost/net.c
030881372 Jason Wang 2016-03-04 687
3a4d5c94e Michael S. Tsirkin 2010-01-14 688 /* Expects to be always run from workqueue - which acts as
3a4d5c94e Michael S. Tsirkin 2010-01-14 689 * read-size critical section for our kind of RCU. */
94249369e Jason Wang 2011-01-17 690 static void handle_rx(struct vhost_net *net)
3a4d5c94e Michael S. Tsirkin 2010-01-14 691 {
81f95a558 Michael S. Tsirkin 2013-04-28 692 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
81f95a558 Michael S. Tsirkin 2013-04-28 693 struct vhost_virtqueue *vq = &nvq->vq;
8dd014adf David Stevens 2010-07-27 694 unsigned uninitialized_var(in), log;
8dd014adf David Stevens 2010-07-27 695 struct vhost_log *vq_log;
8dd014adf David Stevens 2010-07-27 696 struct msghdr msg = {
8dd014adf David Stevens 2010-07-27 697 .msg_name = NULL,
8dd014adf David Stevens 2010-07-27 698 .msg_namelen = 0,
8dd014adf David Stevens 2010-07-27 699 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
8dd014adf David Stevens 2010-07-27 700 .msg_controllen = 0,
8dd014adf David Stevens 2010-07-27 701 .msg_flags = MSG_DONTWAIT,
8dd014adf David Stevens 2010-07-27 702 };
0960b6417 Jason Wang 2015-02-15 703 struct virtio_net_hdr hdr = {
0960b6417 Jason Wang 2015-02-15 704 .flags = 0,
0960b6417 Jason Wang 2015-02-15 705 .gso_type = VIRTIO_NET_HDR_GSO_NONE
8dd014adf David Stevens 2010-07-27 706 };
8dd014adf David Stevens 2010-07-27 707 size_t total_len = 0;
910a578f7 Michael S. Tsirkin 2012-10-24 708 int err, mergeable;
f5a4941aa Jason Wang 2018-05-29 709 s16 headcount;
8dd014adf David Stevens 2010-07-27 710 size_t vhost_hlen, sock_hlen;
8dd014adf David Stevens 2010-07-27 711 size_t vhost_len, sock_len;
2e26af79b Asias He 2013-05-07 712 struct socket *sock;
ba7438aed Al Viro 2014-12-10 713 struct iov_iter fixup;
0960b6417 Jason Wang 2015-02-15 714 __virtio16 num_buffers;
db688c24e Paolo Abeni 2018-04-24 715 int recv_pkts = 0;
8dd014adf David Stevens 2010-07-27 716
aaa3149bb Jason Wang 2018-03-26 717 mutex_lock_nested(&vq->mutex, 0);
2e26af79b Asias He 2013-05-07 718 sock = vq->private_data;
2e26af79b Asias He 2013-05-07 719 if (!sock)
2e26af79b Asias He 2013-05-07 720 goto out;
6b1e6cc78 Jason Wang 2016-06-23 721
6b1e6cc78 Jason Wang 2016-06-23 722 if (!vq_iotlb_prefetch(vq))
6b1e6cc78 Jason Wang 2016-06-23 723 goto out;
6b1e6cc78 Jason Wang 2016-06-23 724
8ea8cf89e Michael S. Tsirkin 2011-05-20 725 vhost_disable_notify(&net->dev, vq);
8241a1e46 Jason Wang 2016-06-01 726 vhost_net_disable_vq(net, vq);
2e26af79b Asias He 2013-05-07 727
81f95a558 Michael S. Tsirkin 2013-04-28 728 vhost_hlen = nvq->vhost_hlen;
81f95a558 Michael S. Tsirkin 2013-04-28 729 sock_hlen = nvq->sock_hlen;
8dd014adf David Stevens 2010-07-27 730
ea16c5143 Michael S. Tsirkin 2014-06-05 731 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
8dd014adf David Stevens 2010-07-27 732 vq->log : NULL;
ea16c5143 Michael S. Tsirkin 2014-06-05 733 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
8dd014adf David Stevens 2010-07-27 734
030881372 Jason Wang 2016-03-04 735 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
8dd014adf David Stevens 2010-07-27 736 sock_len += sock_hlen;
8dd014adf David Stevens 2010-07-27 737 vhost_len = sock_len + vhost_hlen;
f5a4941aa Jason Wang 2018-05-29 @738 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
f5a4941aa Jason Wang 2018-05-29 739 vhost_len, &in, vq_log, &log,
94249369e Jason Wang 2011-01-17 740 likely(mergeable) ? UIO_MAXIOV : 1);
8dd014adf David Stevens 2010-07-27 741 /* On error, stop handling until the next kick. */
8dd014adf David Stevens 2010-07-27 742 if (unlikely(headcount < 0))
8241a1e46 Jason Wang 2016-06-01 743 goto out;
8dd014adf David Stevens 2010-07-27 744 /* OK, now we need to know about added descriptors. */
8dd014adf David Stevens 2010-07-27 745 if (!headcount) {
8ea8cf89e Michael S. Tsirkin 2011-05-20 746 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
8dd014adf David Stevens 2010-07-27 747 /* They have slipped one in as we were
8dd014adf David Stevens 2010-07-27 748 * doing that: check again. */
8ea8cf89e Michael S. Tsirkin 2011-05-20 749 vhost_disable_notify(&net->dev, vq);
8dd014adf David Stevens 2010-07-27 750 continue;
8dd014adf David Stevens 2010-07-27 751 }
8dd014adf David Stevens 2010-07-27 752 /* Nothing new? Wait for eventfd to tell us
8dd014adf David Stevens 2010-07-27 753 * they refilled. */
8241a1e46 Jason Wang 2016-06-01 754 goto out;
8dd014adf David Stevens 2010-07-27 755 }
5990a3051 Jason Wang 2018-01-04 756 if (nvq->rx_ring)
6e474083f Wei Xu 2017-12-01 757 msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
6e474083f Wei Xu 2017-12-01 758 /* On overrun, truncate and discard */
6e474083f Wei Xu 2017-12-01 759 if (unlikely(headcount > UIO_MAXIOV)) {
6e474083f Wei Xu 2017-12-01 760 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
6e474083f Wei Xu 2017-12-01 761 err = sock->ops->recvmsg(sock, &msg,
6e474083f Wei Xu 2017-12-01 762 1, MSG_DONTWAIT | MSG_TRUNC);
6e474083f Wei Xu 2017-12-01 763 pr_debug("Discarded rx packet: len %zd\n", sock_len);
6e474083f Wei Xu 2017-12-01 764 continue;
6e474083f Wei Xu 2017-12-01 765 }
8dd014adf David Stevens 2010-07-27 766 /* We don't need to be notified again. */
ba7438aed Al Viro 2014-12-10 767 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
ba7438aed Al Viro 2014-12-10 768 fixup = msg.msg_iter;
ba7438aed Al Viro 2014-12-10 769 if (unlikely((vhost_hlen))) {
ba7438aed Al Viro 2014-12-10 770 /* We will supply the header ourselves
ba7438aed Al Viro 2014-12-10 771 * TODO: support TSO.
ba7438aed Al Viro 2014-12-10 772 */
ba7438aed Al Viro 2014-12-10 773 iov_iter_advance(&msg.msg_iter, vhost_hlen);
ba7438aed Al Viro 2014-12-10 774 }
1b7841404 Ying Xue 2015-03-02 775 err = sock->ops->recvmsg(sock, &msg,
8dd014adf David Stevens 2010-07-27 776 sock_len, MSG_DONTWAIT | MSG_TRUNC);
8dd014adf David Stevens 2010-07-27 777 /* Userspace might have consumed the packet meanwhile:
8dd014adf David Stevens 2010-07-27 778 * it's not supposed to do this usually, but might be hard
8dd014adf David Stevens 2010-07-27 779 * to prevent. Discard data we got (if any) and keep going. */
8dd014adf David Stevens 2010-07-27 780 if (unlikely(err != sock_len)) {
8dd014adf David Stevens 2010-07-27 781 pr_debug("Discarded rx packet: "
8dd014adf David Stevens 2010-07-27 782 " len %d, expected %zd\n", err, sock_len);
8dd014adf David Stevens 2010-07-27 783 vhost_discard_vq_desc(vq, headcount);
8dd014adf David Stevens 2010-07-27 784 continue;
8dd014adf David Stevens 2010-07-27 785 }
ba7438aed Al Viro 2014-12-10 786 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
4c5a84421 Michael S. Tsirkin 2015-02-25 787 if (unlikely(vhost_hlen)) {
4c5a84421 Michael S. Tsirkin 2015-02-25 788 if (copy_to_iter(&hdr, sizeof(hdr),
4c5a84421 Michael S. Tsirkin 2015-02-25 789 &fixup) != sizeof(hdr)) {
4c5a84421 Michael S. Tsirkin 2015-02-25 790 vq_err(vq, "Unable to write vnet_hdr "
4c5a84421 Michael S. Tsirkin 2015-02-25 791 "at addr %p\n", vq->iov->iov_base);
8241a1e46 Jason Wang 2016-06-01 792 goto out;
8dd014adf David Stevens 2010-07-27 793 }
4c5a84421 Michael S. Tsirkin 2015-02-25 794 } else {
4c5a84421 Michael S. Tsirkin 2015-02-25 795 /* Header came from socket; we'll need to patch
4c5a84421 Michael S. Tsirkin 2015-02-25 796 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
4c5a84421 Michael S. Tsirkin 2015-02-25 797 */
4c5a84421 Michael S. Tsirkin 2015-02-25 798 iov_iter_advance(&fixup, sizeof(hdr));
4c5a84421 Michael S. Tsirkin 2015-02-25 799 }
8dd014adf David Stevens 2010-07-27 800 /* TODO: Should check and handle checksum. */
5201aa49b Michael S. Tsirkin 2015-02-03 801
0960b6417 Jason Wang 2015-02-15 802 num_buffers = cpu_to_vhost16(vq, headcount);
cfbdab951 Jason Wang 2011-01-17 803 if (likely(mergeable) &&
0d79a493e Michael S. Tsirkin 2015-02-25 804 copy_to_iter(&num_buffers, sizeof num_buffers,
0d79a493e Michael S. Tsirkin 2015-02-25 805 &fixup) != sizeof num_buffers) {
8dd014adf David Stevens 2010-07-27 806 vq_err(vq, "Failed num_buffers write");
8dd014adf David Stevens 2010-07-27 807 vhost_discard_vq_desc(vq, headcount);
8241a1e46 Jason Wang 2016-06-01 808 goto out;
8dd014adf David Stevens 2010-07-27 809 }
f5a4941aa Jason Wang 2018-05-29 810 nvq->done_idx += headcount;
f5a4941aa Jason Wang 2018-05-29 811 if (nvq->done_idx > VHOST_RX_BATCH)
f5a4941aa Jason Wang 2018-05-29 812 vhost_rx_signal_used(nvq);
8dd014adf David Stevens 2010-07-27 813 if (unlikely(vq_log))
8dd014adf David Stevens 2010-07-27 814 vhost_log_write(vq, vq_log, log, vhost_len);
8dd014adf David Stevens 2010-07-27 815 total_len += vhost_len;
db688c24e Paolo Abeni 2018-04-24 816 if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
db688c24e Paolo Abeni 2018-04-24 817 unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) {
8dd014adf David Stevens 2010-07-27 818 vhost_poll_queue(&vq->poll);
8241a1e46 Jason Wang 2016-06-01 819 goto out;
8dd014adf David Stevens 2010-07-27 820 }
8dd014adf David Stevens 2010-07-27 821 }
8241a1e46 Jason Wang 2016-06-01 822 vhost_net_enable_vq(net, vq);
2e26af79b Asias He 2013-05-07 823 out:
f5a4941aa Jason Wang 2018-05-29 824 vhost_rx_signal_used(nvq);
8dd014adf David Stevens 2010-07-27 825 mutex_unlock(&vq->mutex);
8dd014adf David Stevens 2010-07-27 826 }
8dd014adf David Stevens 2010-07-27 827
:::::: The code at line 738 was first introduced by commit
:::::: f5a4941aa6d190e676065e8f4ed35999f52a01c3 vhost_net: flush batched heads before trying to busy polling
:::::: TO: Jason Wang <jasowang@redhat.com>
:::::: CC: David S. Miller <davem@davemloft.net>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On 2018年07月04日 09:01, kbuild test robot wrote: > Hi Jason, > > I love your patch! Yet something to improve: > > [auto build test ERROR on net-next/master] > > url:https://github.com/0day-ci/linux/commits/Jason-Wang/Packed-virtqueue-for-vhost/20180703-154751 > config: x86_64-randconfig-s0-07032254 (attached as .config) > compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026 > reproduce: > # save the attached .config to linux build tree > make ARCH=x86_64 > > Note: the linux-review/Jason-Wang/Packed-virtqueue-for-vhost/20180703-154751 HEAD 01b902f1126212ea2597e6d09802bd9c4431bf82 builds fine. > It only hurts bisectibility. > > All errors (new ones prefixed by >>): > > drivers//vhost/net.c: In function 'handle_rx': >>> drivers//vhost/net.c:738:15: error: implicit declaration of function 'get_rx_bufs' [-Werror=implicit-function-declaration] > headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, > ^~~~~~~~~~~ > cc1: some warnings being treated as errors > > vim +/get_rx_bufs +738 drivers//vhost/net.c My bad, forget to do one by one compling test. Will post V2. Thanks
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29756d8..712b134 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -685,83 +685,6 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) return len; } -/* This is a multi-buffer version of vhost_get_desc, that works if - * vq has read descriptors only. - * @vq - the relevant virtqueue - * @datalen - data length we'll be reading - * @iovcount - returned count of io vectors we fill - * @log - vhost log - * @log_num - log offset - * @quota - headcount quota, 1 for big buffer - * returns number of buffer heads allocated, negative on error - */ -static int get_rx_bufs(struct vhost_virtqueue *vq, - struct vring_used_elem *heads, - int datalen, - unsigned *iovcount, - struct vhost_log *log, - unsigned *log_num, - unsigned int quota) -{ - unsigned int out, in; - int seg = 0; - int headcount = 0; - unsigned d; - int r, nlogs = 0; - /* len is always initialized before use since we are always called with - * datalen > 0. - */ - u32 uninitialized_var(len); - - while (datalen > 0 && headcount < quota) { - if (unlikely(seg >= UIO_MAXIOV)) { - r = -ENOBUFS; - goto err; - } - r = vhost_get_vq_desc(vq, vq->iov + seg, - ARRAY_SIZE(vq->iov) - seg, &out, - &in, log, log_num); - if (unlikely(r < 0)) - goto err; - - d = r; - if (d == vq->num) { - r = 0; - goto err; - } - if (unlikely(out || in <= 0)) { - vq_err(vq, "unexpected descriptor format for RX: " - "out %d, in %d\n", out, in); - r = -EINVAL; - goto err; - } - if (unlikely(log)) { - nlogs += *log_num; - log += *log_num; - } - heads[headcount].id = cpu_to_vhost32(vq, d); - len = iov_length(vq->iov + seg, in); - heads[headcount].len = cpu_to_vhost32(vq, len); - datalen -= len; - ++headcount; - seg += in; - } - heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); - *iovcount = seg; - if (unlikely(log)) - *log_num = nlogs; - - /* Detect overrun */ - if (unlikely(datalen > 0)) { - r = UIO_MAXIOV + 1; - goto err; - } - return headcount; -err: - vhost_discard_vq_desc(vq, headcount); - return r; -} - /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a502f1a..8814e5b 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -2104,6 +2104,84 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); +/* This is a multi-buffer version of vhost_get_desc, that works if + * vq has read descriptors only. + * @vq - the relevant virtqueue + * @datalen - data length we'll be reading + * @iovcount - returned count of io vectors we fill + * @log - vhost log + * @log_num - log offset + * @quota - headcount quota, 1 for big buffer + * returns number of buffer heads allocated, negative on error + */ +int vhost_get_bufs(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + int datalen, + unsigned *iovcount, + struct vhost_log *log, + unsigned *log_num, + unsigned int quota) +{ + unsigned int out, in; + int seg = 0; + int headcount = 0; + unsigned d; + int r, nlogs = 0; + /* len is always initialized before use since we are always called with + * datalen > 0. + */ + u32 uninitialized_var(len); + + while (datalen > 0 && headcount < quota) { + if (unlikely(seg >= UIO_MAXIOV)) { + r = -ENOBUFS; + goto err; + } + r = vhost_get_vq_desc(vq, vq->iov + seg, + ARRAY_SIZE(vq->iov) - seg, &out, + &in, log, log_num); + if (unlikely(r < 0)) + goto err; + + d = r; + if (d == vq->num) { + r = 0; + goto err; + } + if (unlikely(out || in <= 0)) { + vq_err(vq, "unexpected descriptor format for RX: " + "out %d, in %d\n", out, in); + r = -EINVAL; + goto err; + } + if (unlikely(log)) { + nlogs += *log_num; + log += *log_num; + } + heads[headcount].id = cpu_to_vhost32(vq, d); + len = iov_length(vq->iov + seg, in); + heads[headcount].len = cpu_to_vhost32(vq, len); + datalen -= len; + ++headcount; + seg += in; + } + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); + *iovcount = seg; + if (unlikely(log)) + *log_num = nlogs; + + /* Detect overrun */ + if (unlikely(datalen > 0)) { + r = UIO_MAXIOV + 1; + goto err; + } + return headcount; +err: + vhost_discard_vq_desc(vq, headcount); + return r; +} +EXPORT_SYMBOL_GPL(vhost_get_bufs); + /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 6c844b9..52edd242 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -185,6 +185,13 @@ int vhost_get_vq_desc(struct vhost_virtqueue *, struct iovec iov[], unsigned int iov_count, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num); +int vhost_get_bufs(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + int datalen, + unsigned *iovcount, + struct vhost_log *log, + unsigned *log_num, + unsigned int quota); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); int vhost_vq_init_access(struct vhost_virtqueue *);
Move get_rx_bufs() to vhost.c and rename it to vhost_get_bufs(). This helps to hide vring internal layout from specific device implementation. Packed ring implementation will benefit from this. Signed-off-by: Jason Wang <jasowang@redhat.com> --- drivers/vhost/net.c | 77 -------------------------------------------------- drivers/vhost/vhost.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++ drivers/vhost/vhost.h | 7 +++++ 3 files changed, 85 insertions(+), 77 deletions(-)