Message ID | e76a29f77bb3f386e4a643c8af94b77b775d1752.1690100802.git.yin31149@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Vhost-vdpa Shadow Virtqueue VLAN support | expand |
On Sun, Jul 23, 2023 at 5:28 PM Hawkins Jiawei <yin31149@gmail.com> wrote: > > This patch introduces vhost_vdpa_net_load_single_vlan() > and vhost_vdpa_net_load_vlan() to restore the vlan > filtering state at device's startup. > > Co-developed-by: Eugenio Pérez <eperezma@redhat.com> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> Acked-by: Jason Wang <jasowang@redhat.com> But this seems to be a source of latency killer as it may at most send 1024 commands. As discussed in the past, we need a better cvq command to do this: for example, a single command to carray a bitmap. Thanks > --- > v2: > - remove the extra line pointed out by Eugenio > > v1: https://lore.kernel.org/all/0a568cc8a8d2b750c2e09b2237e9f05cece07c3f.1689690854.git.yin31149@gmail.com/ > > net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 48 insertions(+) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 9795306742..347241796d 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -965,6 +965,50 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > return 0; > } > > +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, > + const VirtIONet *n, > + uint16_t vid) > +{ > + const struct iovec data = { > + .iov_base = &vid, > + .iov_len = sizeof(vid), > + }; > + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, > + VIRTIO_NET_CTRL_VLAN_ADD, > + &data, 1); > + if (unlikely(dev_written < 0)) { > + return dev_written; > + } > + if (unlikely(*s->status != VIRTIO_NET_OK)) { > + return -EIO; > + } > + > + return 0; > +} > + > +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, > + const VirtIONet *n) > +{ > + int r; > + > + if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { > + return 0; > + } > + > + for (int i = 0; i < MAX_VLAN >> 5; i++) { > + for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { > + if (n->vlans[i] & (1U << j)) { > + r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); > + if (unlikely(r != 0)) { > + return r; > + } > + } > + } > + } > + > + return 0; > +} > + > static int vhost_vdpa_net_load(NetClientState *nc) > { > VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > @@ -995,6 +1039,10 @@ static int vhost_vdpa_net_load(NetClientState *nc) > if (unlikely(r)) { > return r; > } > + r = vhost_vdpa_net_load_vlan(s, n); > + if (unlikely(r)) { > + return r; > + } > > return 0; > } > -- > 2.25.1 >
On 2023/7/25 14:47, Jason Wang wrote: > On Sun, Jul 23, 2023 at 5:28 PM Hawkins Jiawei <yin31149@gmail.com> wrote: >> >> This patch introduces vhost_vdpa_net_load_single_vlan() >> and vhost_vdpa_net_load_vlan() to restore the vlan >> filtering state at device's startup. >> >> Co-developed-by: Eugenio Pérez <eperezma@redhat.com> >> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > > Acked-by: Jason Wang <jasowang@redhat.com> > > But this seems to be a source of latency killer as it may at most send > 1024 commands. > > As discussed in the past, we need a better cvq command to do this: for > example, a single command to carray a bitmap. Hi Jason, Thanks for your review. You are right, we need some improvement here. Therefore, I have submitted another patch series titled "vdpa: Send all CVQ state load commands in parallel" at [1], which allows QEMU to delay polling and checking the device used buffer until either the SVQ is full or control commands shadow buffers are full, so that QEMU can send all the SVQ control commands in parallel, which has better performance improvement. To test that patch series, I created 4094 VLANS in guest to build an environment for sending multiple CVQ state load commands. According to the result on the real vdpa device at [2], this patch series can improve latency from 23296 us to 6539 us. Thanks! [1]. https://lists.gnu.org/archive/html/qemu-devel/2023-07/msg03726.html [2]. https://lists.gnu.org/archive/html/qemu-devel/2023-07/msg03947.html > > Thanks > >> --- >> v2: >> - remove the extra line pointed out by Eugenio >> >> v1: https://lore.kernel.org/all/0a568cc8a8d2b750c2e09b2237e9f05cece07c3f.1689690854.git.yin31149@gmail.com/ >> >> net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 48 insertions(+) >> >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c >> index 9795306742..347241796d 100644 >> --- a/net/vhost-vdpa.c >> +++ b/net/vhost-vdpa.c >> @@ -965,6 +965,50 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, >> return 0; >> } >> >> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, >> + const VirtIONet *n, >> + uint16_t vid) >> +{ >> + const struct iovec data = { >> + .iov_base = &vid, >> + .iov_len = sizeof(vid), >> + }; >> + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, >> + VIRTIO_NET_CTRL_VLAN_ADD, >> + &data, 1); >> + if (unlikely(dev_written < 0)) { >> + return dev_written; >> + } >> + if (unlikely(*s->status != VIRTIO_NET_OK)) { >> + return -EIO; >> + } >> + >> + return 0; >> +} >> + >> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, >> + const VirtIONet *n) >> +{ >> + int r; >> + >> + if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { >> + return 0; >> + } >> + >> + for (int i = 0; i < MAX_VLAN >> 5; i++) { >> + for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { >> + if (n->vlans[i] & (1U << j)) { >> + r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); >> + if (unlikely(r != 0)) { >> + return r; >> + } >> + } >> + } >> + } >> + >> + return 0; >> +} >> + >> static int vhost_vdpa_net_load(NetClientState *nc) >> { >> VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); >> @@ -995,6 +1039,10 @@ static int vhost_vdpa_net_load(NetClientState *nc) >> if (unlikely(r)) { >> return r; >> } >> + r = vhost_vdpa_net_load_vlan(s, n); >> + if (unlikely(r)) { >> + return r; >> + } >> >> return 0; >> } >> -- >> 2.25.1 >> >
On Tue, Jul 25, 2023 at 3:48 PM Hawkins Jiawei <yin31149@gmail.com> wrote: > > On 2023/7/25 14:47, Jason Wang wrote: > > On Sun, Jul 23, 2023 at 5:28 PM Hawkins Jiawei <yin31149@gmail.com> wrote: > >> > >> This patch introduces vhost_vdpa_net_load_single_vlan() > >> and vhost_vdpa_net_load_vlan() to restore the vlan > >> filtering state at device's startup. > >> > >> Co-developed-by: Eugenio Pérez <eperezma@redhat.com> > >> Signed-off-by: Eugenio Pérez <eperezma@redhat.com> > >> Signed-off-by: Hawkins Jiawei <yin31149@gmail.com> > > > > Acked-by: Jason Wang <jasowang@redhat.com> > > > > But this seems to be a source of latency killer as it may at most send > > 1024 commands. > > > > As discussed in the past, we need a better cvq command to do this: for > > example, a single command to carray a bitmap. > > Hi Jason, > > Thanks for your review. > > You are right, we need some improvement here. > > Therefore, I have submitted another patch series titled "vdpa: Send all > CVQ state load commands in parallel" at [1], which allows QEMU to delay > polling and checking the device used buffer until either the SVQ is full > or control commands shadow buffers are full, so that QEMU can send all > the SVQ control commands in parallel, which has better performance > improvement. > > To test that patch series, I created 4094 VLANS in guest to build an > environment for sending multiple CVQ state load commands. According to > the result on the real vdpa device at [2], this patch series can improve > latency from 23296 us to 6539 us. > > Thanks! > > [1]. https://lists.gnu.org/archive/html/qemu-devel/2023-07/msg03726.html > [2]. https://lists.gnu.org/archive/html/qemu-devel/2023-07/msg03947.html > That's great, and if we can use a single command to get/set vid it would be still helpful (I meant we can extend the virtio spec). Thanks > > > > > Thanks > > > >> --- > >> v2: > >> - remove the extra line pointed out by Eugenio > >> > >> v1: https://lore.kernel.org/all/0a568cc8a8d2b750c2e09b2237e9f05cece07c3f.1689690854.git.yin31149@gmail.com/ > >> > >> net/vhost-vdpa.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ > >> 1 file changed, 48 insertions(+) > >> > >> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > >> index 9795306742..347241796d 100644 > >> --- a/net/vhost-vdpa.c > >> +++ b/net/vhost-vdpa.c > >> @@ -965,6 +965,50 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, > >> return 0; > >> } > >> > >> +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, > >> + const VirtIONet *n, > >> + uint16_t vid) > >> +{ > >> + const struct iovec data = { > >> + .iov_base = &vid, > >> + .iov_len = sizeof(vid), > >> + }; > >> + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, > >> + VIRTIO_NET_CTRL_VLAN_ADD, > >> + &data, 1); > >> + if (unlikely(dev_written < 0)) { > >> + return dev_written; > >> + } > >> + if (unlikely(*s->status != VIRTIO_NET_OK)) { > >> + return -EIO; > >> + } > >> + > >> + return 0; > >> +} > >> + > >> +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, > >> + const VirtIONet *n) > >> +{ > >> + int r; > >> + > >> + if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { > >> + return 0; > >> + } > >> + > >> + for (int i = 0; i < MAX_VLAN >> 5; i++) { > >> + for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { > >> + if (n->vlans[i] & (1U << j)) { > >> + r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); > >> + if (unlikely(r != 0)) { > >> + return r; > >> + } > >> + } > >> + } > >> + } > >> + > >> + return 0; > >> +} > >> + > >> static int vhost_vdpa_net_load(NetClientState *nc) > >> { > >> VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > >> @@ -995,6 +1039,10 @@ static int vhost_vdpa_net_load(NetClientState *nc) > >> if (unlikely(r)) { > >> return r; > >> } > >> + r = vhost_vdpa_net_load_vlan(s, n); > >> + if (unlikely(r)) { > >> + return r; > >> + } > >> > >> return 0; > >> } > >> -- > >> 2.25.1 > >> > > >
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 9795306742..347241796d 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -965,6 +965,50 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s, return 0; } +static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, + const VirtIONet *n, + uint16_t vid) +{ + const struct iovec data = { + .iov_base = &vid, + .iov_len = sizeof(vid), + }; + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_ADD, + &data, 1); + if (unlikely(dev_written < 0)) { + return dev_written; + } + if (unlikely(*s->status != VIRTIO_NET_OK)) { + return -EIO; + } + + return 0; +} + +static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, + const VirtIONet *n) +{ + int r; + + if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { + return 0; + } + + for (int i = 0; i < MAX_VLAN >> 5; i++) { + for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { + if (n->vlans[i] & (1U << j)) { + r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j); + if (unlikely(r != 0)) { + return r; + } + } + } + } + + return 0; +} + static int vhost_vdpa_net_load(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); @@ -995,6 +1039,10 @@ static int vhost_vdpa_net_load(NetClientState *nc) if (unlikely(r)) { return r; } + r = vhost_vdpa_net_load_vlan(s, n); + if (unlikely(r)) { + return r; + } return 0; }