@@ -4136,18 +4136,23 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
/**
* stmmac_tso_allocator - close entry point of the driver
* @priv: driver private structure
- * @des: buffer start address
+ * @addr: Contains either skb frag address or skb->data address
* @total_len: total length to fill in descriptors
* @last_segment: condition for the last descriptor
* @queue: TX queue index
+ * @is_skb_frag: condition to check whether skb data is part of fragment or not
* Description:
* This function fills descriptor and request new descriptors according to
* buffer length to fill
+ * Return value:
+ * 0 on success else -ERRNO on fail
*/
-static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
- int total_len, bool last_segment, u32 queue)
+static int stmmac_tso_allocator(struct stmmac_priv *priv, void *addr,
+ int total_len, bool last_segment, u32 queue, bool is_skb_frag)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ unsigned char *data = addr;
+ unsigned int offset = 0;
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
@@ -4161,20 +4166,42 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
if (tx_q->tbs & STMMAC_TBS_AVAIL)
desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
else
desc = &tx_q->dma_tx[tx_q->cur_tx];
- curr_addr = des + (total_len - tmp_len);
+ offset = total_len - tmp_len;
+ if (!is_skb_frag) {
+ curr_addr = dma_map_single(priv->device, data + offset, buff_size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(priv->device, curr_addr))
+ return -ENOMEM;
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
+ } else {
+ curr_addr = skb_frag_dma_map(priv->device, addr, offset,
+ buff_size,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(priv->device, curr_addr))
+ return -ENOMEM;
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+ }
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = curr_addr;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = buff_size;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
if (priv->dma_cap.addr64 <= 32)
desc->des0 = cpu_to_le32(curr_addr);
else
stmmac_set_desc_addr(priv, desc, curr_addr);
- buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
- TSO_MAX_BUFF_SIZE : tmp_len;
-
stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
0, 1,
(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
@@ -4182,6 +4209,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
tmp_len -= TSO_MAX_BUFF_SIZE;
}
+ return 0;
}
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
@@ -4351,25 +4379,23 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
pay_len = 0;
}
- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+ if (priv->dma_cap.addr64 <= 32) {
+ if (stmmac_tso_allocator(priv, skb->data,
+ tmp_pay_len, nfrags == 0, queue, false))
+ goto dma_map_err;
+ } else {
+ if (stmmac_tso_allocator(priv, (skb->data + proto_hdr_len),
+ tmp_pay_len, nfrags == 0, queue, false))
+ goto dma_map_err;
+ }
/* Prepare fragments */
for (i = 0; i < nfrags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- des = skb_frag_dma_map(priv->device, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, des))
+ if (stmmac_tso_allocator(priv, frag, skb_frag_size(frag),
+ (i == nfrags - 1), queue, true))
goto dma_map_err;
-
- stmmac_tso_allocator(priv, des, skb_frag_size(frag),
- (i == nfrags - 1), queue);
-
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
- tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
- tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
}
tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;