@@ -17,6 +17,8 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <linux/if_vlan.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -158,6 +160,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_error; /* page alloc error */
};
struct enic_wq {
@@ -169,6 +172,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -231,8 +235,14 @@ struct enic {
void *opaque);
int (*rq_alloc_buf)(struct vnic_rq *rq);
void (*rq_free_buf)(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+ void (*rq_cleanup)(struct enic_rq *rq);
};
+static inline unsigned int get_max_pkt_len(struct enic *enic)
+{
+ return enic->netdev->mtu + VLAN_ETH_HLEN;
+}
+
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
{
struct enic *enic = vdev->priv;
@@ -51,6 +51,7 @@ static const struct enic_stat enic_per_rq_stats[] = {
ENIC_PER_RQ_STAT(napi_repoll),
ENIC_PER_RQ_STAT(no_skb),
ENIC_PER_RQ_STAT(desc_skip),
+ ENIC_PER_RQ_STAT(pp_alloc_error),
};
#define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
@@ -1282,6 +1282,11 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
return -EMSGSIZE;
}
+/* nothing to do for buffers based allocation */
+static void enic_rq_buf_cleanup(struct enic_rq *rq)
+{
+}
+
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -1881,10 +1886,33 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
-
- enic->rq_buf_service = enic_rq_indicate_buf;
- enic->rq_alloc_buf = enic_rq_alloc_buf;
- enic->rq_free_buf = enic_free_rq_buf;
+ bool use_page_pool;
+ struct page_pool_params pp_params = { 0 };
+
+ /* Use the Page Pool API for MTUs <= PAGE_SIZE */
+ use_page_pool = (get_max_pkt_len(enic) <= PAGE_SIZE);
+
+ if (use_page_pool) {
+ /* use the page pool API */
+ pp_params.order = 0;
+ pp_params.pool_size = enic->config.rq_desc_count;
+ pp_params.nid = dev_to_node(&enic->pdev->dev);
+ pp_params.dev = &enic->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.netdev = netdev;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+
+ enic->rq_buf_service = enic_rq_indicate_page;
+ enic->rq_alloc_buf = enic_rq_alloc_page;
+ enic->rq_free_buf = enic_rq_free_page;
+ enic->rq_cleanup = enic_rq_page_cleanup;
+ } else {
+ enic->rq_buf_service = enic_rq_indicate_buf;
+ enic->rq_alloc_buf = enic_rq_alloc_buf;
+ enic->rq_free_buf = enic_free_rq_buf;
+ enic->rq_cleanup = enic_rq_buf_cleanup;
+ }
err = enic_request_intr(enic);
if (err) {
@@ -1902,6 +1930,13 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ if (use_page_pool) {
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ }
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic->rq_alloc_buf);
@@ -1942,8 +1977,10 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic->rq_free_buf);
+ enic->rq_cleanup(&enic->rq[i]);
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2001,8 +2038,10 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic->rq_free_buf);
+ enic->rq_cleanup(&enic->rq[i]);
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -7,6 +7,7 @@
#include "enic_rq.h"
#include "vnic_rq.h"
#include "cq_enet_desc.h"
+#include "enic_res.h"
#define ENIC_LARGE_PKT_THRESHOLD 1000
@@ -118,3 +119,143 @@ int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
+
+void enic_rq_page_cleanup(struct enic_rq *rq)
+{
+ struct vnic_rq *vrq = &rq->vrq;
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct napi_struct *napi = &enic->napi[vrq->index];
+
+ napi_free_frags(napi);
+ page_pool_destroy(rq->pool);
+}
+
+void enic_rq_free_page(struct vnic_rq *vrq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq *rq = &enic->rq[vrq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_page(rq->pool, (struct page *)buf->os_buf,
+ get_max_pkt_len(enic), true);
+ buf->os_buf = NULL;
+}
+
+int enic_rq_alloc_page(struct vnic_rq *vrq)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq *rq = &enic->rq[vrq->index];
+ struct enic_rq_stats *rqstats = &rq->stats;
+ struct vnic_rq_buf *buf = vrq->to_use;
+ dma_addr_t dma_addr;
+ struct page *page;
+ unsigned int offset = 0;
+ unsigned int len;
+ unsigned int truesize;
+
+ len = get_max_pkt_len(enic);
+ truesize = len;
+
+ if (buf->os_buf) {
+ dma_addr = buf->dma_addr;
+ } else {
+ page = page_pool_dev_alloc(rq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_error++;
+ return -ENOMEM;
+ }
+ buf->os_buf = (void *)page;
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ }
+
+ enic_queue_rq_desc(vrq, buf->os_buf, dma_addr, len);
+
+ return 0;
+}
+
+/* Unmap and free pages fragments making up the error packet.
+ */
+static void enic_rq_error_reset(struct vnic_rq *vrq)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct napi_struct *napi = &enic->napi[vrq->index];
+
+ napi_free_frags(napi);
+}
+
+void enic_rq_indicate_page(struct vnic_rq *vrq, struct cq_desc *cq_desc,
+ struct vnic_rq_buf *buf, int skipped, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct sk_buff *skb;
+ struct enic_rq *rq = &enic->rq[vrq->index];
+ struct enic_rq_stats *rqstats = &rq->stats;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, vrq->index)];
+ struct napi_struct *napi;
+ u8 type, color, eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ if (skipped) {
+ rqstats->desc_skip++;
+ return;
+ }
+
+ if (!buf || !buf->dma_addr) {
+ net_warn_ratelimited("%s[%u]: !buf || !buf->dma_addr!!\n",
+ enic->netdev->name, q_number);
+ return;
+ }
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
+ &type, &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &bytes_written,
+ &packet_error, &vlan_stripped, &vlan_tci, &checksum,
+ &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
+ &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
+ &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
+ &fcs_ok);
+
+ if (enic_rq_pkt_error(vrq, packet_error, fcs_ok, bytes_written)) {
+ enic_rq_error_reset(vrq);
+ return;
+ }
+
+ napi = &enic->napi[vrq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, vrq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, bytes_written,
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, (struct page *)buf->os_buf,
+ buf->offset, bytes_written, buf->truesize);
+
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+
+ enic_rq_set_skb_flags(vrq, type, rss_hash, rss_type, fcoe, fcoe_fc_crc_ok,
+ vlan_stripped, csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter, skb->len);
+ skb_mark_for_recycle(skb);
+ skb_record_rx_queue(skb, vrq->index);
+ napi_gro_frags(napi);
+ rqstats->packets++;
+}
@@ -19,4 +19,9 @@ int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
u8 type, u16 q_number, u16 completed_index, void *opaque);
void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
struct vnic_rq_buf *buf, int skipped, void *opaque);
+void enic_rq_indicate_page(struct vnic_rq *rq, struct cq_desc *cq_desc,
+ struct vnic_rq_buf *buf, int skipped, void *opaque);
+int enic_rq_alloc_page(struct vnic_rq *rq);
+void enic_rq_free_page(struct vnic_rq *rq, struct vnic_rq_buf *buf);
+void enic_rq_page_cleanup(struct enic_rq *rq);
#endif /* _ENIC_RQ_H_ */
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {