Message ID | 20240325214912.323749-3-danielj@nvidia.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Remove RTNL lock protection of CVQ | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Clearly marked for net-next |
netdev/apply | fail | Patch does not apply to net-next-0 |
在 2024/3/26 上午5:49, Daniel Jurgens 写道: > Allocate memory for the data when it's used. Ideally the could be on the > stack, but we can't DMA stack memory. With this change only the header > and status memory are shared between commands, which will allow using a > tighter lock than RTNL. > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com> > Reviewed-by: Jiri Pirko <jiri@nvidia.com> > --- > drivers/net/virtio_net.c | 110 ++++++++++++++++++++++++++------------- > 1 file changed, 74 insertions(+), 36 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 7419a68cf6e2..6780479a1e6c 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -235,14 +235,6 @@ struct virtio_net_ctrl_rss { > struct control_buf { > struct virtio_net_ctrl_hdr hdr; > virtio_net_ctrl_ack status; > - struct virtio_net_ctrl_mq mq; > - u8 promisc; > - u8 allmulti; > - __virtio16 vid; > - __virtio64 offloads; > - struct virtio_net_ctrl_coal_tx coal_tx; > - struct virtio_net_ctrl_coal_rx coal_rx; > - struct virtio_net_ctrl_coal_vq coal_vq; > }; > > struct virtnet_info { > @@ -2654,14 +2646,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) > > static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) > { > + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; > struct scatterlist sg; > struct net_device *dev = vi->dev; > > if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) > return 0; > > - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); > - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); > + mq = kzalloc(sizeof(*mq), GFP_KERNEL); > + if (!mq) > + return -ENOMEM; > + > + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); > + sg_init_one(&sg, mq, sizeof(*mq)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { > @@ -2708,6 +2705,7 @@ static int virtnet_close(struct net_device *dev) > > static void virtnet_set_rx_mode(struct net_device *dev) You need to rebase next branch in the next version, .ndo_set_rx_mode has been completed using workqueue in that. Regards, Heng > { > + u8 *promisc_allmulti __free(kfree) = NULL; > struct virtnet_info *vi = netdev_priv(dev); > struct scatterlist sg[2]; > struct virtio_net_ctrl_mac *mac_data; > @@ -2721,22 +2719,27 @@ static void virtnet_set_rx_mode(struct net_device *dev) > if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) > return; > > - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); > - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); > + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC); > + if (!promisc_allmulti) { > + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); > + return; > + } > > - sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); > + *promisc_allmulti = !!(dev->flags & IFF_PROMISC); > + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, > VIRTIO_NET_CTRL_RX_PROMISC, sg)) > dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", > - vi->ctrl->promisc ? "en" : "dis"); > + *promisc_allmulti ? "en" : "dis"); > > - sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); > + *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); > + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, > VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) > dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", > - vi->ctrl->allmulti ? "en" : "dis"); > + *promisc_allmulti ? "en" : "dis"); > > uc_count = netdev_uc_count(dev); > mc_count = netdev_mc_count(dev); > @@ -2780,10 +2783,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, > __be16 proto, u16 vid) > { > struct virtnet_info *vi = netdev_priv(dev); > + __virtio16 *_vid __free(kfree) = NULL; > struct scatterlist sg; > > - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); > - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); > + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); > + if (!_vid) > + return -ENOMEM; > + > + *_vid = cpu_to_virtio16(vi->vdev, vid); > + sg_init_one(&sg, _vid, sizeof(*_vid)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, > VIRTIO_NET_CTRL_VLAN_ADD, &sg)) > @@ -2795,10 +2803,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, > __be16 proto, u16 vid) > { > struct virtnet_info *vi = netdev_priv(dev); > + __virtio16 *_vid __free(kfree) = NULL; > struct scatterlist sg; > > - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); > - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); > + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); > + if (!_vid) > + return -ENOMEM; > + > + *_vid = cpu_to_virtio16(vi->vdev, vid); > + sg_init_one(&sg, _vid, sizeof(*_vid)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, > VIRTIO_NET_CTRL_VLAN_DEL, &sg)) > @@ -2911,12 +2924,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) > static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, > u16 vqn, u32 max_usecs, u32 max_packets) > { > + struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL; > struct scatterlist sgs; > > - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); > - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); > - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); > - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); > + coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL); > + if (!coal_vq) > + return -ENOMEM; > + > + coal_vq->vqn = cpu_to_le16(vqn); > + coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); > + coal_vq->coal.max_packets = cpu_to_le32(max_packets); > + sg_init_one(&sgs, coal_vq, sizeof(*coal_vq)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, > VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, > @@ -3062,11 +3080,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, > vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG > - : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { > - dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); > - return false; > - } > + : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) > + goto err; > + > return true; > + > +err: > + dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); > + return false; > + > } > > static void virtnet_init_default_rss(struct virtnet_info *vi) > @@ -3371,12 +3393,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev, > static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, > struct ethtool_coalesce *ec) > { > + struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL; > struct scatterlist sgs_tx; > int i; > > - vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); > - vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); > - sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); > + coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL); > + if (!coal_tx) > + return -ENOMEM; > + > + coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); > + coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); > + sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, > VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, > @@ -3396,6 +3423,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, > static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, > struct ethtool_coalesce *ec) > { > + struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; > bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; > struct scatterlist sgs_rx; > int i; > @@ -3414,6 +3442,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, > return 0; > } > > + coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); > + if (!coal_rx) > + return -ENOMEM; > + > if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { > vi->rx_dim_enabled = false; > for (i = 0; i < vi->max_queue_pairs; i++) > @@ -3424,9 +3456,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, > * we need apply the global new params even if they > * are not updated. > */ > - vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); > - vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); > - sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); > + coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); > + coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); > + sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, > VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, > @@ -3893,10 +3925,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) > { > + u64 *_offloads __free(kfree) = NULL; > struct scatterlist sg; > - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); > > - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); > + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); > + if (!_offloads) > + return -ENOMEM; > + > + *_offloads = cpu_to_virtio64(vi->vdev, offloads); > + > + sg_init_one(&sg, _offloads, sizeof(*_offloads)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, > VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
On Mon, Mar 25, 2024 at 04:49:09PM -0500, Daniel Jurgens wrote: > Allocate memory for the data when it's used. Ideally the could be on the > stack, but we can't DMA stack memory. With this change only the header > and status memory are shared between commands, which will allow using a > tighter lock than RTNL. > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com> > Reviewed-by: Jiri Pirko <jiri@nvidia.com> ... > @@ -3893,10 +3925,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) > { > + u64 *_offloads __free(kfree) = NULL; > struct scatterlist sg; > - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); > > - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); > + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); > + if (!_offloads) > + return -ENOMEM; > + > + *_offloads = cpu_to_virtio64(vi->vdev, offloads); Hi Daniel, There is a type mismatch between *_offloads and cpu_to_virtio64 which is flagged by Sparse as follows: .../virtio_net.c:3978:20: warning: incorrect type in assignment (different base types) .../virtio_net.c:3978:20: expected unsigned long long [usertype] .../virtio_net.c:3978:20: got restricted __virtio64 I think this can be addressed by changing the type of *_offloads to __virtio64 *. > + > + sg_init_one(&sg, _offloads, sizeof(*_offloads)); > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, > VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { > -- > 2.42.0 > >
On Thu, Mar 28, 2024 at 01:35:16PM +0000, Simon Horman wrote: > On Mon, Mar 25, 2024 at 04:49:09PM -0500, Daniel Jurgens wrote: > > Allocate memory for the data when it's used. Ideally the could be on the > > stack, but we can't DMA stack memory. With this change only the header > > and status memory are shared between commands, which will allow using a > > tighter lock than RTNL. > > > > Signed-off-by: Daniel Jurgens <danielj@nvidia.com> > > Reviewed-by: Jiri Pirko <jiri@nvidia.com> > > ... > > > @@ -3893,10 +3925,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) > > > > static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) > > { > > + u64 *_offloads __free(kfree) = NULL; > > struct scatterlist sg; > > - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); > > > > - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); > > + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); > > + if (!_offloads) > > + return -ENOMEM; > > + > > + *_offloads = cpu_to_virtio64(vi->vdev, offloads); > > Hi Daniel, > > There is a type mismatch between *_offloads and cpu_to_virtio64 > which is flagged by Sparse as follows: > > .../virtio_net.c:3978:20: warning: incorrect type in assignment (different base types) > .../virtio_net.c:3978:20: expected unsigned long long [usertype] > .../virtio_net.c:3978:20: got restricted __virtio64 > > I think this can be addressed by changing the type of *_offloads to > __virtio64 *. Yes pls, endian-ness is easier to get right 1st time than fix afterwards. > > + > > + sg_init_one(&sg, _offloads, sizeof(*_offloads)); > > > > if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, > > VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { > > -- > > 2.42.0 > > > >
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7419a68cf6e2..6780479a1e6c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -235,14 +235,6 @@ struct virtio_net_ctrl_rss { struct control_buf { struct virtio_net_ctrl_hdr hdr; virtio_net_ctrl_ack status; - struct virtio_net_ctrl_mq mq; - u8 promisc; - u8 allmulti; - __virtio16 vid; - __virtio64 offloads; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - struct virtio_net_ctrl_coal_vq coal_vq; }; struct virtnet_info { @@ -2654,14 +2646,19 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; struct scatterlist sg; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); + mq = kzalloc(sizeof(*mq), GFP_KERNEL); + if (!mq) + return -ENOMEM; + + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, mq, sizeof(*mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -2708,6 +2705,7 @@ static int virtnet_close(struct net_device *dev) static void virtnet_set_rx_mode(struct net_device *dev) { + u8 *promisc_allmulti __free(kfree) = NULL; struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; struct virtio_net_ctrl_mac *mac_data; @@ -2721,22 +2719,27 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC); + if (!promisc_allmulti) { + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); + return; + } - sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); + *promisc_allmulti = !!(dev->flags & IFF_PROMISC); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl->promisc ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); + *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl->allmulti ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); @@ -2780,10 +2783,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -2795,10 +2803,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -2911,12 +2924,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, u16 vqn, u32 max_usecs, u32 max_packets) { + struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL; struct scatterlist sgs; - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL); + if (!coal_vq) + return -ENOMEM; + + coal_vq->vqn = cpu_to_le16(vqn); + coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); + coal_vq->coal.max_packets = cpu_to_le32(max_packets); + sg_init_one(&sgs, coal_vq, sizeof(*coal_vq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, @@ -3062,11 +3080,15 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG - : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { - dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); - return false; - } + : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) + goto err; + return true; + +err: + dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); + return false; + } static void virtnet_init_default_rss(struct virtnet_info *vi) @@ -3371,12 +3393,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev, static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL; struct scatterlist sgs_tx; int i; - vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); + coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL); + if (!coal_tx) + return -ENOMEM; + + coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -3396,6 +3423,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; struct scatterlist sgs_rx; int i; @@ -3414,6 +3442,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, return 0; } + coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); + if (!coal_rx) + return -ENOMEM; + if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { vi->rx_dim_enabled = false; for (i = 0; i < vi->max_queue_pairs; i++) @@ -3424,9 +3456,9 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, * we need apply the global new params even if they * are not updated. */ - vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); + coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, @@ -3893,10 +3925,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { + u64 *_offloads __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); + if (!_offloads) + return -ENOMEM; + + *_offloads = cpu_to_virtio64(vi->vdev, offloads); + + sg_init_one(&sg, _offloads, sizeof(*_offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {