@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->max_frs;
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
u64 schq, parent;
u64 dwrr_val;
- dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
- | OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -326,7 +326,7 @@ struct otx2_nic {
struct net_device *netdev;
struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
@@ -1312,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
* NIX transfers entire data using 6 segments/buffers and writes
* a CQE_RX descriptor with those segment addresses. First segment
* has additional data prepended to packet. Also software omits a
- * headroom of 128 bytes and sizeof(struct skb_shared_info) in
- * each segment. Hence the total size of memory needed
- * to receive a packet with 'mtu' is:
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
* frame size = mtu + additional data;
- * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * memory = frame_size + headroom * 6;
* each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
- total_size = frame_size + (OTX2_HEAD_ROOM +
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
return ALIGN(rbuf_size, 2048);
@@ -1343,7 +1341,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
@@ -1807,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -181,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -203,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -349,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
seg_addr = &sg->seg_addr;
seg_size = (void *)sg;
for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
- otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
- parse);
- cq->pool_ptrs++;
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
}
start += sizeof(*sg);
}
@@ -39,9 +39,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.