@@ -17,6 +17,7 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -158,6 +159,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
struct enic_wq {
@@ -169,6 +171,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -1735,6 +1735,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1752,6 +1763,11 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -1792,8 +1808,10 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -1851,8 +1869,10 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2362,6 +2382,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2389,6 +2410,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -21,25 +21,6 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
pkt_size->small_pkt_bytes_cnt += pkt_len;
}
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index, void *opaque)
{
@@ -142,11 +123,15 @@ int enic_rq_alloc_buf(struct vnic_rq *rq)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
unsigned int os_buf_index = 0;
dma_addr_t dma_addr;
struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
if (buf->os_buf) {
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
@@ -154,20 +139,16 @@ int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq[rq->index].stats.no_skb++;
- return -ENOMEM;
- }
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
return -ENOMEM;
}
-
- enic_queue_rq_desc(rq, skb, os_buf_index, dma_addr, len);
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
return 0;
}
@@ -175,13 +156,12 @@ int enic_rq_alloc_buf(struct vnic_rq *rq)
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
if (!buf->os_buf)
return;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
buf->os_buf = NULL;
}
@@ -189,10 +169,10 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
struct vnic_rq_buf *buf, int skipped, void *opaque)
{
struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -208,8 +188,6 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
return;
}
- skb = buf->os_buf;
-
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color,
&q_number, &completed_index, &ingress_port, &fcoe,
&eop, &sop, &rss_type, &csum_not_calc, &rss_hash,
@@ -219,48 +197,46 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
&tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
&fcs_ok);
- if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written)) {
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
return;
- }
if (eop && bytes_written > 0) {
/* Good receive
*/
rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
}
+
prefetch(skb->data - NET_IP_ALIGN);
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
skb_record_rx_queue(skb, q_number);
enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
fcoe_fc_crc_ok, vlan_stripped,
csum_not_calc, tcp_udp_csum_ok, ipv6,
ipv4_csum_ok, vlan_tci, skb);
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_intr_update_pkt_size(&cq->pkt_size_counter,
bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
} else {
/* Buffer overflow
*/
rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
}
}
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {