diff mbox series

[net-next,v1,04/19] virtio_net: move to virtio_net.h

Message ID 20231016120033.26933-5-xuanzhuo@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: support AF_XDP zero copy | expand

Checks

Context Check Description
netdev/series_format fail Series longer than 15 patches (and no cover letter)
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1360 this patch: 1360
netdev/cc_maintainers success CCed 14 of 14 maintainers
netdev/build_clang success Errors and warnings before: 1385 this patch: 1385
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1387 this patch: 1387
netdev/checkpatch warning WARNING: added, moved or deleted file(s), does MAINTAINERS need updating?
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Xuan Zhuo Oct. 16, 2023, noon UTC
Move some structure definitions and inline functions into the
virtio_net.h file.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio/main.c       | 252 +------------------------------
 drivers/net/virtio/virtio_net.h | 256 ++++++++++++++++++++++++++++++++
 2 files changed, 258 insertions(+), 250 deletions(-)
 create mode 100644 drivers/net/virtio/virtio_net.h

Comments

Jason Wang Oct. 19, 2023, 6:12 a.m. UTC | #1
On Mon, Oct 16, 2023 at 8:00 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> Move some structure definitions and inline functions into the
> virtio_net.h file.

Some of the functions are not inline one before the moving. I'm not
sure what's the criteria to choose the function to be moved.


>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio/main.c       | 252 +------------------------------
>  drivers/net/virtio/virtio_net.h | 256 ++++++++++++++++++++++++++++++++
>  2 files changed, 258 insertions(+), 250 deletions(-)
>  create mode 100644 drivers/net/virtio/virtio_net.h
>
> diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> index 6cf77b6acdab..d8b6c0d86f29 100644
> --- a/drivers/net/virtio/main.c
> +++ b/drivers/net/virtio/main.c
> @@ -6,7 +6,6 @@
>  //#define DEBUG
>  #include <linux/netdevice.h>
>  #include <linux/etherdevice.h>
> -#include <linux/ethtool.h>
>  #include <linux/module.h>
>  #include <linux/virtio.h>
>  #include <linux/virtio_net.h>
> @@ -16,7 +15,6 @@
>  #include <linux/if_vlan.h>
>  #include <linux/slab.h>
>  #include <linux/cpu.h>
> -#include <linux/average.h>
>  #include <linux/filter.h>
>  #include <linux/kernel.h>
>  #include <net/route.h>
> @@ -24,6 +22,8 @@
>  #include <net/net_failover.h>
>  #include <net/netdev_rx_queue.h>
>
> +#include "virtio_net.h"
> +
>  static int napi_weight = NAPI_POLL_WEIGHT;
>  module_param(napi_weight, int, 0444);
>
> @@ -45,15 +45,6 @@ module_param(napi_tx, bool, 0644);
>  #define VIRTIO_XDP_TX          BIT(0)
>  #define VIRTIO_XDP_REDIR       BIT(1)
>
> -#define VIRTIO_XDP_FLAG        BIT(0)
> -
> -/* RX packet size EWMA. The average packet size is used to determine the packet
> - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> - * term, transient changes in packet size.
> - */
> -DECLARE_EWMA(pkt_len, 0, 64)
> -
>  #define VIRTNET_DRIVER_VERSION "1.0.0"
>
>  static const unsigned long guest_offloads[] = {
> @@ -74,36 +65,6 @@ static const unsigned long guest_offloads[] = {
>                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
>                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
>
> -struct virtnet_stat_desc {
> -       char desc[ETH_GSTRING_LEN];
> -       size_t offset;
> -};
> -
> -struct virtnet_sq_stats {
> -       struct u64_stats_sync syncp;
> -       u64 packets;
> -       u64 bytes;
> -       u64 xdp_tx;
> -       u64 xdp_tx_drops;
> -       u64 kicks;
> -       u64 tx_timeouts;
> -};
> -
> -struct virtnet_rq_stats {
> -       struct u64_stats_sync syncp;
> -       u64 packets;
> -       u64 bytes;
> -       u64 drops;
> -       u64 xdp_packets;
> -       u64 xdp_tx;
> -       u64 xdp_redirects;
> -       u64 xdp_drops;
> -       u64 kicks;
> -};
> -
> -#define VIRTNET_SQ_STAT(m)     offsetof(struct virtnet_sq_stats, m)
> -#define VIRTNET_RQ_STAT(m)     offsetof(struct virtnet_rq_stats, m)
> -
>  static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
>         { "packets",            VIRTNET_SQ_STAT(packets) },
>         { "bytes",              VIRTNET_SQ_STAT(bytes) },
> @@ -127,80 +88,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
>  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
>  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
>
> -struct virtnet_interrupt_coalesce {
> -       u32 max_packets;
> -       u32 max_usecs;
> -};
> -
> -/* The dma information of pages allocated at a time. */
> -struct virtnet_rq_dma {
> -       dma_addr_t addr;
> -       u32 ref;
> -       u16 len;
> -       u16 need_sync;
> -};
> -
> -/* Internal representation of a send virtqueue */
> -struct send_queue {
> -       /* Virtqueue associated with this send _queue */
> -       struct virtqueue *vq;
> -
> -       /* TX: fragments + linear part + virtio header */
> -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -       /* Name of the send queue: output.$index */
> -       char name[16];
> -
> -       struct virtnet_sq_stats stats;
> -
> -       struct virtnet_interrupt_coalesce intr_coal;
> -
> -       struct napi_struct napi;
> -
> -       /* Record whether sq is in reset state. */
> -       bool reset;
> -};
> -
> -/* Internal representation of a receive virtqueue */
> -struct receive_queue {
> -       /* Virtqueue associated with this receive_queue */
> -       struct virtqueue *vq;
> -
> -       struct napi_struct napi;
> -
> -       struct bpf_prog __rcu *xdp_prog;
> -
> -       struct virtnet_rq_stats stats;
> -
> -       struct virtnet_interrupt_coalesce intr_coal;
> -
> -       /* Chain pages by the private ptr. */
> -       struct page *pages;
> -
> -       /* Average packet length for mergeable receive buffers. */
> -       struct ewma_pkt_len mrg_avg_pkt_len;
> -
> -       /* Page frag for packet buffer allocation. */
> -       struct page_frag alloc_frag;
> -
> -       /* RX: fragments + linear part + virtio header */
> -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -       /* Min single buffer size for mergeable buffers case. */
> -       unsigned int min_buf_len;
> -
> -       /* Name of this receive queue: input.$index */
> -       char name[16];
> -
> -       struct xdp_rxq_info xdp_rxq;
> -
> -       /* Record the last dma info to free after new pages is allocated. */
> -       struct virtnet_rq_dma *last_dma;
> -
> -       /* Do dma by self */
> -       bool do_dma;
> -};
> -
>  /* This structure can contain rss message with maximum settings for indirection table and keysize
>   * Note, that default structure that describes RSS configuration virtio_net_rss_config
>   * contains same info but can't handle table values.
> @@ -234,88 +121,6 @@ struct control_buf {
>         struct virtio_net_ctrl_coal_vq coal_vq;
>  };
>
> -struct virtnet_info {
> -       struct virtio_device *vdev;
> -       struct virtqueue *cvq;
> -       struct net_device *dev;
> -       struct send_queue *sq;
> -       struct receive_queue *rq;
> -       unsigned int status;
> -
> -       /* Max # of queue pairs supported by the device */
> -       u16 max_queue_pairs;
> -
> -       /* # of queue pairs currently used by the driver */
> -       u16 curr_queue_pairs;
> -
> -       /* # of XDP queue pairs currently used by the driver */
> -       u16 xdp_queue_pairs;
> -
> -       /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> -       bool xdp_enabled;
> -
> -       /* I like... big packets and I cannot lie! */
> -       bool big_packets;
> -
> -       /* number of sg entries allocated for big packets */
> -       unsigned int big_packets_num_skbfrags;
> -
> -       /* Host will merge rx buffers for big packets (shake it! shake it!) */
> -       bool mergeable_rx_bufs;
> -
> -       /* Host supports rss and/or hash report */
> -       bool has_rss;
> -       bool has_rss_hash_report;
> -       u8 rss_key_size;
> -       u16 rss_indir_table_size;
> -       u32 rss_hash_types_supported;
> -       u32 rss_hash_types_saved;
> -
> -       /* Has control virtqueue */
> -       bool has_cvq;
> -
> -       /* Host can handle any s/g split between our header and packet data */
> -       bool any_header_sg;
> -
> -       /* Packet virtio header size */
> -       u8 hdr_len;
> -
> -       /* Work struct for delayed refilling if we run low on memory. */
> -       struct delayed_work refill;
> -
> -       /* Is delayed refill enabled? */
> -       bool refill_enabled;
> -
> -       /* The lock to synchronize the access to refill_enabled */
> -       spinlock_t refill_lock;
> -
> -       /* Work struct for config space updates */
> -       struct work_struct config_work;
> -
> -       /* Does the affinity hint is set for virtqueues? */
> -       bool affinity_hint_set;
> -
> -       /* CPU hotplug instances for online & dead */
> -       struct hlist_node node;
> -       struct hlist_node node_dead;
> -
> -       struct control_buf *ctrl;
> -
> -       /* Ethtool settings */
> -       u8 duplex;
> -       u32 speed;
> -
> -       /* Interrupt coalescing settings */
> -       struct virtnet_interrupt_coalesce intr_coal_tx;
> -       struct virtnet_interrupt_coalesce intr_coal_rx;
> -
> -       unsigned long guest_offloads;
> -       unsigned long guest_offloads_capable;
> -
> -       /* failover when STANDBY feature enabled */
> -       struct failover *failover;
> -};
> -
>  struct padded_vnet_hdr {
>         struct virtio_net_hdr_v1_hash hdr;
>         /*
> @@ -337,45 +142,11 @@ struct virtio_net_common_hdr {
>  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
>  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>
> -static bool is_xdp_frame(void *ptr)
> -{
> -       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> -}
> -
>  static void *xdp_to_ptr(struct xdp_frame *ptr)
>  {
>         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
>  }

Any reason for not moving this?

Thanks

>
Xuan Zhuo Oct. 19, 2023, 7:16 a.m. UTC | #2
On Thu, 19 Oct 2023 14:12:55 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Oct 16, 2023 at 8:00 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > Move some structure definitions and inline functions into the
> > virtio_net.h file.
>
> Some of the functions are not inline one before the moving. I'm not
> sure what's the criteria to choose the function to be moved.


That will used by xsk.c or other funcions in headers in the subsequence
commits.

If you are confused, I can try move the function when that is needed.
This commit just move some important structures.

Thanks.

>
>
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio/main.c       | 252 +------------------------------
> >  drivers/net/virtio/virtio_net.h | 256 ++++++++++++++++++++++++++++++++
> >  2 files changed, 258 insertions(+), 250 deletions(-)
> >  create mode 100644 drivers/net/virtio/virtio_net.h
> >
> > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> > index 6cf77b6acdab..d8b6c0d86f29 100644
> > --- a/drivers/net/virtio/main.c
> > +++ b/drivers/net/virtio/main.c
> > @@ -6,7 +6,6 @@
> >  //#define DEBUG
> >  #include <linux/netdevice.h>
> >  #include <linux/etherdevice.h>
> > -#include <linux/ethtool.h>
> >  #include <linux/module.h>
> >  #include <linux/virtio.h>
> >  #include <linux/virtio_net.h>
> > @@ -16,7 +15,6 @@
> >  #include <linux/if_vlan.h>
> >  #include <linux/slab.h>
> >  #include <linux/cpu.h>
> > -#include <linux/average.h>
> >  #include <linux/filter.h>
> >  #include <linux/kernel.h>
> >  #include <net/route.h>
> > @@ -24,6 +22,8 @@
> >  #include <net/net_failover.h>
> >  #include <net/netdev_rx_queue.h>
> >
> > +#include "virtio_net.h"
> > +
> >  static int napi_weight = NAPI_POLL_WEIGHT;
> >  module_param(napi_weight, int, 0444);
> >
> > @@ -45,15 +45,6 @@ module_param(napi_tx, bool, 0644);
> >  #define VIRTIO_XDP_TX          BIT(0)
> >  #define VIRTIO_XDP_REDIR       BIT(1)
> >
> > -#define VIRTIO_XDP_FLAG        BIT(0)
> > -
> > -/* RX packet size EWMA. The average packet size is used to determine the packet
> > - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > - * term, transient changes in packet size.
> > - */
> > -DECLARE_EWMA(pkt_len, 0, 64)
> > -
> >  #define VIRTNET_DRIVER_VERSION "1.0.0"
> >
> >  static const unsigned long guest_offloads[] = {
> > @@ -74,36 +65,6 @@ static const unsigned long guest_offloads[] = {
> >                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
> >                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
> >
> > -struct virtnet_stat_desc {
> > -       char desc[ETH_GSTRING_LEN];
> > -       size_t offset;
> > -};
> > -
> > -struct virtnet_sq_stats {
> > -       struct u64_stats_sync syncp;
> > -       u64 packets;
> > -       u64 bytes;
> > -       u64 xdp_tx;
> > -       u64 xdp_tx_drops;
> > -       u64 kicks;
> > -       u64 tx_timeouts;
> > -};
> > -
> > -struct virtnet_rq_stats {
> > -       struct u64_stats_sync syncp;
> > -       u64 packets;
> > -       u64 bytes;
> > -       u64 drops;
> > -       u64 xdp_packets;
> > -       u64 xdp_tx;
> > -       u64 xdp_redirects;
> > -       u64 xdp_drops;
> > -       u64 kicks;
> > -};
> > -
> > -#define VIRTNET_SQ_STAT(m)     offsetof(struct virtnet_sq_stats, m)
> > -#define VIRTNET_RQ_STAT(m)     offsetof(struct virtnet_rq_stats, m)
> > -
> >  static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
> >         { "packets",            VIRTNET_SQ_STAT(packets) },
> >         { "bytes",              VIRTNET_SQ_STAT(bytes) },
> > @@ -127,80 +88,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> >
> > -struct virtnet_interrupt_coalesce {
> > -       u32 max_packets;
> > -       u32 max_usecs;
> > -};
> > -
> > -/* The dma information of pages allocated at a time. */
> > -struct virtnet_rq_dma {
> > -       dma_addr_t addr;
> > -       u32 ref;
> > -       u16 len;
> > -       u16 need_sync;
> > -};
> > -
> > -/* Internal representation of a send virtqueue */
> > -struct send_queue {
> > -       /* Virtqueue associated with this send _queue */
> > -       struct virtqueue *vq;
> > -
> > -       /* TX: fragments + linear part + virtio header */
> > -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -       /* Name of the send queue: output.$index */
> > -       char name[16];
> > -
> > -       struct virtnet_sq_stats stats;
> > -
> > -       struct virtnet_interrupt_coalesce intr_coal;
> > -
> > -       struct napi_struct napi;
> > -
> > -       /* Record whether sq is in reset state. */
> > -       bool reset;
> > -};
> > -
> > -/* Internal representation of a receive virtqueue */
> > -struct receive_queue {
> > -       /* Virtqueue associated with this receive_queue */
> > -       struct virtqueue *vq;
> > -
> > -       struct napi_struct napi;
> > -
> > -       struct bpf_prog __rcu *xdp_prog;
> > -
> > -       struct virtnet_rq_stats stats;
> > -
> > -       struct virtnet_interrupt_coalesce intr_coal;
> > -
> > -       /* Chain pages by the private ptr. */
> > -       struct page *pages;
> > -
> > -       /* Average packet length for mergeable receive buffers. */
> > -       struct ewma_pkt_len mrg_avg_pkt_len;
> > -
> > -       /* Page frag for packet buffer allocation. */
> > -       struct page_frag alloc_frag;
> > -
> > -       /* RX: fragments + linear part + virtio header */
> > -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -       /* Min single buffer size for mergeable buffers case. */
> > -       unsigned int min_buf_len;
> > -
> > -       /* Name of this receive queue: input.$index */
> > -       char name[16];
> > -
> > -       struct xdp_rxq_info xdp_rxq;
> > -
> > -       /* Record the last dma info to free after new pages is allocated. */
> > -       struct virtnet_rq_dma *last_dma;
> > -
> > -       /* Do dma by self */
> > -       bool do_dma;
> > -};
> > -
> >  /* This structure can contain rss message with maximum settings for indirection table and keysize
> >   * Note, that default structure that describes RSS configuration virtio_net_rss_config
> >   * contains same info but can't handle table values.
> > @@ -234,88 +121,6 @@ struct control_buf {
> >         struct virtio_net_ctrl_coal_vq coal_vq;
> >  };
> >
> > -struct virtnet_info {
> > -       struct virtio_device *vdev;
> > -       struct virtqueue *cvq;
> > -       struct net_device *dev;
> > -       struct send_queue *sq;
> > -       struct receive_queue *rq;
> > -       unsigned int status;
> > -
> > -       /* Max # of queue pairs supported by the device */
> > -       u16 max_queue_pairs;
> > -
> > -       /* # of queue pairs currently used by the driver */
> > -       u16 curr_queue_pairs;
> > -
> > -       /* # of XDP queue pairs currently used by the driver */
> > -       u16 xdp_queue_pairs;
> > -
> > -       /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > -       bool xdp_enabled;
> > -
> > -       /* I like... big packets and I cannot lie! */
> > -       bool big_packets;
> > -
> > -       /* number of sg entries allocated for big packets */
> > -       unsigned int big_packets_num_skbfrags;
> > -
> > -       /* Host will merge rx buffers for big packets (shake it! shake it!) */
> > -       bool mergeable_rx_bufs;
> > -
> > -       /* Host supports rss and/or hash report */
> > -       bool has_rss;
> > -       bool has_rss_hash_report;
> > -       u8 rss_key_size;
> > -       u16 rss_indir_table_size;
> > -       u32 rss_hash_types_supported;
> > -       u32 rss_hash_types_saved;
> > -
> > -       /* Has control virtqueue */
> > -       bool has_cvq;
> > -
> > -       /* Host can handle any s/g split between our header and packet data */
> > -       bool any_header_sg;
> > -
> > -       /* Packet virtio header size */
> > -       u8 hdr_len;
> > -
> > -       /* Work struct for delayed refilling if we run low on memory. */
> > -       struct delayed_work refill;
> > -
> > -       /* Is delayed refill enabled? */
> > -       bool refill_enabled;
> > -
> > -       /* The lock to synchronize the access to refill_enabled */
> > -       spinlock_t refill_lock;
> > -
> > -       /* Work struct for config space updates */
> > -       struct work_struct config_work;
> > -
> > -       /* Does the affinity hint is set for virtqueues? */
> > -       bool affinity_hint_set;
> > -
> > -       /* CPU hotplug instances for online & dead */
> > -       struct hlist_node node;
> > -       struct hlist_node node_dead;
> > -
> > -       struct control_buf *ctrl;
> > -
> > -       /* Ethtool settings */
> > -       u8 duplex;
> > -       u32 speed;
> > -
> > -       /* Interrupt coalescing settings */
> > -       struct virtnet_interrupt_coalesce intr_coal_tx;
> > -       struct virtnet_interrupt_coalesce intr_coal_rx;
> > -
> > -       unsigned long guest_offloads;
> > -       unsigned long guest_offloads_capable;
> > -
> > -       /* failover when STANDBY feature enabled */
> > -       struct failover *failover;
> > -};
> > -
> >  struct padded_vnet_hdr {
> >         struct virtio_net_hdr_v1_hash hdr;
> >         /*
> > @@ -337,45 +142,11 @@ struct virtio_net_common_hdr {
> >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> > -static bool is_xdp_frame(void *ptr)
> > -{
> > -       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > -}
> > -
> >  static void *xdp_to_ptr(struct xdp_frame *ptr)
> >  {
> >         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> >  }
>
> Any reason for not moving this?
>
> Thanks
>
> >
>
Jason Wang Oct. 20, 2023, 6:59 a.m. UTC | #3
On Thu, Oct 19, 2023 at 3:20 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Thu, 19 Oct 2023 14:12:55 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Mon, Oct 16, 2023 at 8:00 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > Move some structure definitions and inline functions into the
> > > virtio_net.h file.
> >
> > Some of the functions are not inline one before the moving. I'm not
> > sure what's the criteria to choose the function to be moved.
>
>
> That will used by xsk.c or other funcions in headers in the subsequence
> commits.
>
> If you are confused, I can try move the function when that is needed.
> This commit just move some important structures.

That's fine.

Thanks

>
> Thanks.
>
> >
> >
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio/main.c       | 252 +------------------------------
> > >  drivers/net/virtio/virtio_net.h | 256 ++++++++++++++++++++++++++++++++
> > >  2 files changed, 258 insertions(+), 250 deletions(-)
> > >  create mode 100644 drivers/net/virtio/virtio_net.h
> > >
> > > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> > > index 6cf77b6acdab..d8b6c0d86f29 100644
> > > --- a/drivers/net/virtio/main.c
> > > +++ b/drivers/net/virtio/main.c
> > > @@ -6,7 +6,6 @@
> > >  //#define DEBUG
> > >  #include <linux/netdevice.h>
> > >  #include <linux/etherdevice.h>
> > > -#include <linux/ethtool.h>
> > >  #include <linux/module.h>
> > >  #include <linux/virtio.h>
> > >  #include <linux/virtio_net.h>
> > > @@ -16,7 +15,6 @@
> > >  #include <linux/if_vlan.h>
> > >  #include <linux/slab.h>
> > >  #include <linux/cpu.h>
> > > -#include <linux/average.h>
> > >  #include <linux/filter.h>
> > >  #include <linux/kernel.h>
> > >  #include <net/route.h>
> > > @@ -24,6 +22,8 @@
> > >  #include <net/net_failover.h>
> > >  #include <net/netdev_rx_queue.h>
> > >
> > > +#include "virtio_net.h"
> > > +
> > >  static int napi_weight = NAPI_POLL_WEIGHT;
> > >  module_param(napi_weight, int, 0444);
> > >
> > > @@ -45,15 +45,6 @@ module_param(napi_tx, bool, 0644);
> > >  #define VIRTIO_XDP_TX          BIT(0)
> > >  #define VIRTIO_XDP_REDIR       BIT(1)
> > >
> > > -#define VIRTIO_XDP_FLAG        BIT(0)
> > > -
> > > -/* RX packet size EWMA. The average packet size is used to determine the packet
> > > - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > > - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > > - * term, transient changes in packet size.
> > > - */
> > > -DECLARE_EWMA(pkt_len, 0, 64)
> > > -
> > >  #define VIRTNET_DRIVER_VERSION "1.0.0"
> > >
> > >  static const unsigned long guest_offloads[] = {
> > > @@ -74,36 +65,6 @@ static const unsigned long guest_offloads[] = {
> > >                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
> > >                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
> > >
> > > -struct virtnet_stat_desc {
> > > -       char desc[ETH_GSTRING_LEN];
> > > -       size_t offset;
> > > -};
> > > -
> > > -struct virtnet_sq_stats {
> > > -       struct u64_stats_sync syncp;
> > > -       u64 packets;
> > > -       u64 bytes;
> > > -       u64 xdp_tx;
> > > -       u64 xdp_tx_drops;
> > > -       u64 kicks;
> > > -       u64 tx_timeouts;
> > > -};
> > > -
> > > -struct virtnet_rq_stats {
> > > -       struct u64_stats_sync syncp;
> > > -       u64 packets;
> > > -       u64 bytes;
> > > -       u64 drops;
> > > -       u64 xdp_packets;
> > > -       u64 xdp_tx;
> > > -       u64 xdp_redirects;
> > > -       u64 xdp_drops;
> > > -       u64 kicks;
> > > -};
> > > -
> > > -#define VIRTNET_SQ_STAT(m)     offsetof(struct virtnet_sq_stats, m)
> > > -#define VIRTNET_RQ_STAT(m)     offsetof(struct virtnet_rq_stats, m)
> > > -
> > >  static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
> > >         { "packets",            VIRTNET_SQ_STAT(packets) },
> > >         { "bytes",              VIRTNET_SQ_STAT(bytes) },
> > > @@ -127,80 +88,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > >
> > > -struct virtnet_interrupt_coalesce {
> > > -       u32 max_packets;
> > > -       u32 max_usecs;
> > > -};
> > > -
> > > -/* The dma information of pages allocated at a time. */
> > > -struct virtnet_rq_dma {
> > > -       dma_addr_t addr;
> > > -       u32 ref;
> > > -       u16 len;
> > > -       u16 need_sync;
> > > -};
> > > -
> > > -/* Internal representation of a send virtqueue */
> > > -struct send_queue {
> > > -       /* Virtqueue associated with this send _queue */
> > > -       struct virtqueue *vq;
> > > -
> > > -       /* TX: fragments + linear part + virtio header */
> > > -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > > -
> > > -       /* Name of the send queue: output.$index */
> > > -       char name[16];
> > > -
> > > -       struct virtnet_sq_stats stats;
> > > -
> > > -       struct virtnet_interrupt_coalesce intr_coal;
> > > -
> > > -       struct napi_struct napi;
> > > -
> > > -       /* Record whether sq is in reset state. */
> > > -       bool reset;
> > > -};
> > > -
> > > -/* Internal representation of a receive virtqueue */
> > > -struct receive_queue {
> > > -       /* Virtqueue associated with this receive_queue */
> > > -       struct virtqueue *vq;
> > > -
> > > -       struct napi_struct napi;
> > > -
> > > -       struct bpf_prog __rcu *xdp_prog;
> > > -
> > > -       struct virtnet_rq_stats stats;
> > > -
> > > -       struct virtnet_interrupt_coalesce intr_coal;
> > > -
> > > -       /* Chain pages by the private ptr. */
> > > -       struct page *pages;
> > > -
> > > -       /* Average packet length for mergeable receive buffers. */
> > > -       struct ewma_pkt_len mrg_avg_pkt_len;
> > > -
> > > -       /* Page frag for packet buffer allocation. */
> > > -       struct page_frag alloc_frag;
> > > -
> > > -       /* RX: fragments + linear part + virtio header */
> > > -       struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > > -
> > > -       /* Min single buffer size for mergeable buffers case. */
> > > -       unsigned int min_buf_len;
> > > -
> > > -       /* Name of this receive queue: input.$index */
> > > -       char name[16];
> > > -
> > > -       struct xdp_rxq_info xdp_rxq;
> > > -
> > > -       /* Record the last dma info to free after new pages is allocated. */
> > > -       struct virtnet_rq_dma *last_dma;
> > > -
> > > -       /* Do dma by self */
> > > -       bool do_dma;
> > > -};
> > > -
> > >  /* This structure can contain rss message with maximum settings for indirection table and keysize
> > >   * Note, that default structure that describes RSS configuration virtio_net_rss_config
> > >   * contains same info but can't handle table values.
> > > @@ -234,88 +121,6 @@ struct control_buf {
> > >         struct virtio_net_ctrl_coal_vq coal_vq;
> > >  };
> > >
> > > -struct virtnet_info {
> > > -       struct virtio_device *vdev;
> > > -       struct virtqueue *cvq;
> > > -       struct net_device *dev;
> > > -       struct send_queue *sq;
> > > -       struct receive_queue *rq;
> > > -       unsigned int status;
> > > -
> > > -       /* Max # of queue pairs supported by the device */
> > > -       u16 max_queue_pairs;
> > > -
> > > -       /* # of queue pairs currently used by the driver */
> > > -       u16 curr_queue_pairs;
> > > -
> > > -       /* # of XDP queue pairs currently used by the driver */
> > > -       u16 xdp_queue_pairs;
> > > -
> > > -       /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > > -       bool xdp_enabled;
> > > -
> > > -       /* I like... big packets and I cannot lie! */
> > > -       bool big_packets;
> > > -
> > > -       /* number of sg entries allocated for big packets */
> > > -       unsigned int big_packets_num_skbfrags;
> > > -
> > > -       /* Host will merge rx buffers for big packets (shake it! shake it!) */
> > > -       bool mergeable_rx_bufs;
> > > -
> > > -       /* Host supports rss and/or hash report */
> > > -       bool has_rss;
> > > -       bool has_rss_hash_report;
> > > -       u8 rss_key_size;
> > > -       u16 rss_indir_table_size;
> > > -       u32 rss_hash_types_supported;
> > > -       u32 rss_hash_types_saved;
> > > -
> > > -       /* Has control virtqueue */
> > > -       bool has_cvq;
> > > -
> > > -       /* Host can handle any s/g split between our header and packet data */
> > > -       bool any_header_sg;
> > > -
> > > -       /* Packet virtio header size */
> > > -       u8 hdr_len;
> > > -
> > > -       /* Work struct for delayed refilling if we run low on memory. */
> > > -       struct delayed_work refill;
> > > -
> > > -       /* Is delayed refill enabled? */
> > > -       bool refill_enabled;
> > > -
> > > -       /* The lock to synchronize the access to refill_enabled */
> > > -       spinlock_t refill_lock;
> > > -
> > > -       /* Work struct for config space updates */
> > > -       struct work_struct config_work;
> > > -
> > > -       /* Does the affinity hint is set for virtqueues? */
> > > -       bool affinity_hint_set;
> > > -
> > > -       /* CPU hotplug instances for online & dead */
> > > -       struct hlist_node node;
> > > -       struct hlist_node node_dead;
> > > -
> > > -       struct control_buf *ctrl;
> > > -
> > > -       /* Ethtool settings */
> > > -       u8 duplex;
> > > -       u32 speed;
> > > -
> > > -       /* Interrupt coalescing settings */
> > > -       struct virtnet_interrupt_coalesce intr_coal_tx;
> > > -       struct virtnet_interrupt_coalesce intr_coal_rx;
> > > -
> > > -       unsigned long guest_offloads;
> > > -       unsigned long guest_offloads_capable;
> > > -
> > > -       /* failover when STANDBY feature enabled */
> > > -       struct failover *failover;
> > > -};
> > > -
> > >  struct padded_vnet_hdr {
> > >         struct virtio_net_hdr_v1_hash hdr;
> > >         /*
> > > @@ -337,45 +142,11 @@ struct virtio_net_common_hdr {
> > >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > >
> > > -static bool is_xdp_frame(void *ptr)
> > > -{
> > > -       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > > -}
> > > -
> > >  static void *xdp_to_ptr(struct xdp_frame *ptr)
> > >  {
> > >         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> > >  }
> >
> > Any reason for not moving this?
> >
> > Thanks
> >
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
index 6cf77b6acdab..d8b6c0d86f29 100644
--- a/drivers/net/virtio/main.c
+++ b/drivers/net/virtio/main.c
@@ -6,7 +6,6 @@ 
 //#define DEBUG
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/ethtool.h>
 #include <linux/module.h>
 #include <linux/virtio.h>
 #include <linux/virtio_net.h>
@@ -16,7 +15,6 @@ 
 #include <linux/if_vlan.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
-#include <linux/average.h>
 #include <linux/filter.h>
 #include <linux/kernel.h>
 #include <net/route.h>
@@ -24,6 +22,8 @@ 
 #include <net/net_failover.h>
 #include <net/netdev_rx_queue.h>
 
+#include "virtio_net.h"
+
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
 
@@ -45,15 +45,6 @@  module_param(napi_tx, bool, 0644);
 #define VIRTIO_XDP_TX		BIT(0)
 #define VIRTIO_XDP_REDIR	BIT(1)
 
-#define VIRTIO_XDP_FLAG	BIT(0)
-
-/* RX packet size EWMA. The average packet size is used to determine the packet
- * buffer size when refilling RX rings. As the entire RX ring may be refilled
- * at once, the weight is chosen so that the EWMA will be insensitive to short-
- * term, transient changes in packet size.
- */
-DECLARE_EWMA(pkt_len, 0, 64)
-
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
 static const unsigned long guest_offloads[] = {
@@ -74,36 +65,6 @@  static const unsigned long guest_offloads[] = {
 				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
 				(1ULL << VIRTIO_NET_F_GUEST_USO6))
 
-struct virtnet_stat_desc {
-	char desc[ETH_GSTRING_LEN];
-	size_t offset;
-};
-
-struct virtnet_sq_stats {
-	struct u64_stats_sync syncp;
-	u64 packets;
-	u64 bytes;
-	u64 xdp_tx;
-	u64 xdp_tx_drops;
-	u64 kicks;
-	u64 tx_timeouts;
-};
-
-struct virtnet_rq_stats {
-	struct u64_stats_sync syncp;
-	u64 packets;
-	u64 bytes;
-	u64 drops;
-	u64 xdp_packets;
-	u64 xdp_tx;
-	u64 xdp_redirects;
-	u64 xdp_drops;
-	u64 kicks;
-};
-
-#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
-
 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 	{ "packets",		VIRTNET_SQ_STAT(packets) },
 	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
@@ -127,80 +88,6 @@  static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
 #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
 
-struct virtnet_interrupt_coalesce {
-	u32 max_packets;
-	u32 max_usecs;
-};
-
-/* The dma information of pages allocated at a time. */
-struct virtnet_rq_dma {
-	dma_addr_t addr;
-	u32 ref;
-	u16 len;
-	u16 need_sync;
-};
-
-/* Internal representation of a send virtqueue */
-struct send_queue {
-	/* Virtqueue associated with this send _queue */
-	struct virtqueue *vq;
-
-	/* TX: fragments + linear part + virtio header */
-	struct scatterlist sg[MAX_SKB_FRAGS + 2];
-
-	/* Name of the send queue: output.$index */
-	char name[16];
-
-	struct virtnet_sq_stats stats;
-
-	struct virtnet_interrupt_coalesce intr_coal;
-
-	struct napi_struct napi;
-
-	/* Record whether sq is in reset state. */
-	bool reset;
-};
-
-/* Internal representation of a receive virtqueue */
-struct receive_queue {
-	/* Virtqueue associated with this receive_queue */
-	struct virtqueue *vq;
-
-	struct napi_struct napi;
-
-	struct bpf_prog __rcu *xdp_prog;
-
-	struct virtnet_rq_stats stats;
-
-	struct virtnet_interrupt_coalesce intr_coal;
-
-	/* Chain pages by the private ptr. */
-	struct page *pages;
-
-	/* Average packet length for mergeable receive buffers. */
-	struct ewma_pkt_len mrg_avg_pkt_len;
-
-	/* Page frag for packet buffer allocation. */
-	struct page_frag alloc_frag;
-
-	/* RX: fragments + linear part + virtio header */
-	struct scatterlist sg[MAX_SKB_FRAGS + 2];
-
-	/* Min single buffer size for mergeable buffers case. */
-	unsigned int min_buf_len;
-
-	/* Name of this receive queue: input.$index */
-	char name[16];
-
-	struct xdp_rxq_info xdp_rxq;
-
-	/* Record the last dma info to free after new pages is allocated. */
-	struct virtnet_rq_dma *last_dma;
-
-	/* Do dma by self */
-	bool do_dma;
-};
-
 /* This structure can contain rss message with maximum settings for indirection table and keysize
  * Note, that default structure that describes RSS configuration virtio_net_rss_config
  * contains same info but can't handle table values.
@@ -234,88 +121,6 @@  struct control_buf {
 	struct virtio_net_ctrl_coal_vq coal_vq;
 };
 
-struct virtnet_info {
-	struct virtio_device *vdev;
-	struct virtqueue *cvq;
-	struct net_device *dev;
-	struct send_queue *sq;
-	struct receive_queue *rq;
-	unsigned int status;
-
-	/* Max # of queue pairs supported by the device */
-	u16 max_queue_pairs;
-
-	/* # of queue pairs currently used by the driver */
-	u16 curr_queue_pairs;
-
-	/* # of XDP queue pairs currently used by the driver */
-	u16 xdp_queue_pairs;
-
-	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
-	bool xdp_enabled;
-
-	/* I like... big packets and I cannot lie! */
-	bool big_packets;
-
-	/* number of sg entries allocated for big packets */
-	unsigned int big_packets_num_skbfrags;
-
-	/* Host will merge rx buffers for big packets (shake it! shake it!) */
-	bool mergeable_rx_bufs;
-
-	/* Host supports rss and/or hash report */
-	bool has_rss;
-	bool has_rss_hash_report;
-	u8 rss_key_size;
-	u16 rss_indir_table_size;
-	u32 rss_hash_types_supported;
-	u32 rss_hash_types_saved;
-
-	/* Has control virtqueue */
-	bool has_cvq;
-
-	/* Host can handle any s/g split between our header and packet data */
-	bool any_header_sg;
-
-	/* Packet virtio header size */
-	u8 hdr_len;
-
-	/* Work struct for delayed refilling if we run low on memory. */
-	struct delayed_work refill;
-
-	/* Is delayed refill enabled? */
-	bool refill_enabled;
-
-	/* The lock to synchronize the access to refill_enabled */
-	spinlock_t refill_lock;
-
-	/* Work struct for config space updates */
-	struct work_struct config_work;
-
-	/* Does the affinity hint is set for virtqueues? */
-	bool affinity_hint_set;
-
-	/* CPU hotplug instances for online & dead */
-	struct hlist_node node;
-	struct hlist_node node_dead;
-
-	struct control_buf *ctrl;
-
-	/* Ethtool settings */
-	u8 duplex;
-	u32 speed;
-
-	/* Interrupt coalescing settings */
-	struct virtnet_interrupt_coalesce intr_coal_tx;
-	struct virtnet_interrupt_coalesce intr_coal_rx;
-
-	unsigned long guest_offloads;
-	unsigned long guest_offloads_capable;
-
-	/* failover when STANDBY feature enabled */
-	struct failover *failover;
-};
-
 struct padded_vnet_hdr {
 	struct virtio_net_hdr_v1_hash hdr;
 	/*
@@ -337,45 +142,11 @@  struct virtio_net_common_hdr {
 static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 
-static bool is_xdp_frame(void *ptr)
-{
-	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
-}
-
 static void *xdp_to_ptr(struct xdp_frame *ptr)
 {
 	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
 }
 
-static struct xdp_frame *ptr_to_xdp(void *ptr)
-{
-	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
-}
-
-static void __free_old_xmit(struct send_queue *sq, bool in_napi,
-			    struct virtnet_sq_stats *stats)
-{
-	unsigned int len;
-	void *ptr;
-
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (!is_xdp_frame(ptr)) {
-			struct sk_buff *skb = ptr;
-
-			pr_debug("Sent skb %p\n", skb);
-
-			stats->bytes += skb->len;
-			napi_consume_skb(skb, in_napi);
-		} else {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			stats->bytes += xdp_get_frame_len(frame);
-			xdp_return_frame(frame);
-		}
-		stats->packets++;
-	}
-}
-
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -446,15 +217,6 @@  static void disable_delayed_refill(struct virtnet_info *vi)
 	spin_unlock_bh(&vi->refill_lock);
 }
 
-static void virtqueue_napi_schedule(struct napi_struct *napi,
-				    struct virtqueue *vq)
-{
-	if (napi_schedule_prep(napi)) {
-		virtqueue_disable_cb(vq);
-		__napi_schedule(napi);
-	}
-}
-
 static void virtqueue_napi_complete(struct napi_struct *napi,
 				    struct virtqueue *vq, int processed)
 {
@@ -786,16 +548,6 @@  static void free_old_xmit(struct send_queue *sq, bool in_napi)
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-		return false;
-	else if (q < vi->curr_queue_pairs)
-		return true;
-	else
-		return false;
-}
-
 static void check_sq_full_and_disable(struct virtnet_info *vi,
 				      struct net_device *dev,
 				      struct send_queue *sq)
diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
new file mode 100644
index 000000000000..ddaf0ecf4d9d
--- /dev/null
+++ b/drivers/net/virtio/virtio_net.h
@@ -0,0 +1,256 @@ 
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __VIRTIO_NET_H__
+#define __VIRTIO_NET_H__
+
+#include <linux/ethtool.h>
+#include <linux/average.h>
+
+#define VIRTIO_XDP_FLAG	BIT(0)
+
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
+ */
+DECLARE_EWMA(pkt_len, 0, 64)
+
+struct virtnet_stat_desc {
+	char desc[ETH_GSTRING_LEN];
+	size_t offset;
+};
+
+struct virtnet_sq_stats {
+	struct u64_stats_sync syncp;
+	u64 packets;
+	u64 bytes;
+	u64 xdp_tx;
+	u64 xdp_tx_drops;
+	u64 kicks;
+	u64 tx_timeouts;
+};
+
+struct virtnet_rq_stats {
+	struct u64_stats_sync syncp;
+	u64 packets;
+	u64 bytes;
+	u64 drops;
+	u64 xdp_packets;
+	u64 xdp_tx;
+	u64 xdp_redirects;
+	u64 xdp_drops;
+	u64 kicks;
+};
+
+#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
+#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
+
+struct virtnet_interrupt_coalesce {
+	u32 max_packets;
+	u32 max_usecs;
+};
+
+/* The dma information of pages allocated at a time. */
+struct virtnet_rq_dma {
+	dma_addr_t addr;
+	u32 ref;
+	u16 len;
+	u16 need_sync;
+};
+
+/* Internal representation of a send virtqueue */
+struct send_queue {
+	/* Virtqueue associated with this send _queue */
+	struct virtqueue *vq;
+
+	/* TX: fragments + linear part + virtio header */
+	struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+	/* Name of the send queue: output.$index */
+	char name[16];
+
+	struct virtnet_sq_stats stats;
+
+	struct virtnet_interrupt_coalesce intr_coal;
+
+	struct napi_struct napi;
+
+	/* Record whether sq is in reset state. */
+	bool reset;
+};
+
+/* Internal representation of a receive virtqueue */
+struct receive_queue {
+	/* Virtqueue associated with this receive_queue */
+	struct virtqueue *vq;
+
+	struct napi_struct napi;
+
+	struct bpf_prog __rcu *xdp_prog;
+
+	struct virtnet_rq_stats stats;
+
+	struct virtnet_interrupt_coalesce intr_coal;
+
+	/* Chain pages by the private ptr. */
+	struct page *pages;
+
+	/* Average packet length for mergeable receive buffers. */
+	struct ewma_pkt_len mrg_avg_pkt_len;
+
+	/* Page frag for packet buffer allocation. */
+	struct page_frag alloc_frag;
+
+	/* RX: fragments + linear part + virtio header */
+	struct scatterlist sg[MAX_SKB_FRAGS + 2];
+
+	/* Min single buffer size for mergeable buffers case. */
+	unsigned int min_buf_len;
+
+	/* Name of this receive queue: input.$index */
+	char name[16];
+
+	struct xdp_rxq_info xdp_rxq;
+
+	/* Record the last dma info to free after new pages is allocated. */
+	struct virtnet_rq_dma *last_dma;
+
+	/* Do dma by self */
+	bool do_dma;
+};
+
+struct virtnet_info {
+	struct virtio_device *vdev;
+	struct virtqueue *cvq;
+	struct net_device *dev;
+	struct send_queue *sq;
+	struct receive_queue *rq;
+	unsigned int status;
+
+	/* Max # of queue pairs supported by the device */
+	u16 max_queue_pairs;
+
+	/* # of queue pairs currently used by the driver */
+	u16 curr_queue_pairs;
+
+	/* # of XDP queue pairs currently used by the driver */
+	u16 xdp_queue_pairs;
+
+	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
+	bool xdp_enabled;
+
+	/* I like... big packets and I cannot lie! */
+	bool big_packets;
+
+	/* number of sg entries allocated for big packets */
+	unsigned int big_packets_num_skbfrags;
+
+	/* Host will merge rx buffers for big packets (shake it! shake it!) */
+	bool mergeable_rx_bufs;
+
+	/* Host supports rss and/or hash report */
+	bool has_rss;
+	bool has_rss_hash_report;
+	u8 rss_key_size;
+	u16 rss_indir_table_size;
+	u32 rss_hash_types_supported;
+	u32 rss_hash_types_saved;
+
+	/* Has control virtqueue */
+	bool has_cvq;
+
+	/* Host can handle any s/g split between our header and packet data */
+	bool any_header_sg;
+
+	/* Packet virtio header size */
+	u8 hdr_len;
+
+	/* Work struct for delayed refilling if we run low on memory. */
+	struct delayed_work refill;
+
+	/* Is delayed refill enabled? */
+	bool refill_enabled;
+
+	/* The lock to synchronize the access to refill_enabled */
+	spinlock_t refill_lock;
+
+	/* Work struct for config space updates */
+	struct work_struct config_work;
+
+	/* Does the affinity hint is set for virtqueues? */
+	bool affinity_hint_set;
+
+	/* CPU hotplug instances for online & dead */
+	struct hlist_node node;
+	struct hlist_node node_dead;
+
+	struct control_buf *ctrl;
+
+	/* Ethtool settings */
+	u8 duplex;
+	u32 speed;
+
+	/* Interrupt coalescing settings */
+	struct virtnet_interrupt_coalesce intr_coal_tx;
+	struct virtnet_interrupt_coalesce intr_coal_rx;
+
+	unsigned long guest_offloads;
+	unsigned long guest_offloads_capable;
+
+	/* failover when STANDBY feature enabled */
+	struct failover *failover;
+};
+
+static inline bool is_xdp_frame(void *ptr)
+{
+	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static inline struct xdp_frame *ptr_to_xdp(void *ptr)
+{
+	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
+static inline void __free_old_xmit(struct send_queue *sq, bool in_napi,
+				   struct virtnet_sq_stats *stats)
+{
+	unsigned int len;
+	void *ptr;
+
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (!is_xdp_frame(ptr)) {
+			struct sk_buff *skb = ptr;
+
+			pr_debug("Sent skb %p\n", skb);
+
+			stats->bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			stats->bytes += xdp_get_frame_len(frame);
+			xdp_return_frame(frame);
+		}
+		stats->packets++;
+	}
+}
+
+static inline void virtqueue_napi_schedule(struct napi_struct *napi,
+					   struct virtqueue *vq)
+{
+	if (napi_schedule_prep(napi)) {
+		virtqueue_disable_cb(vq);
+		__napi_schedule(napi);
+	}
+}
+
+static inline bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+		return false;
+	else if (q < vi->curr_queue_pairs)
+		return true;
+	else
+		return false;
+}
+#endif