@@ -484,7 +484,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->len;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
+ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
/*
* we have to stop the queue and deliver all messages in case
@@ -476,7 +476,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
priv->dlc[idx] = frame->len;
- can_put_echo_skb(skb, dev, idx);
+ can_put_echo_skb(skb, dev, idx, 0);
/* Update the active bits */
atomic_add((1 << idx), &priv->tx_active);
@@ -702,7 +702,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
stats->tx_bytes += cf->len;
stats->tx_packets++;
- can_put_echo_skb(priv->tx_skb, dev, 0);
+ can_put_echo_skb(priv->tx_skb, dev, 0, 0);
can_get_echo_skb(dev, 0);
priv->tx_skb = NULL;
@@ -38,7 +38,7 @@ void can_flush_echo_skb(struct net_device *dev)
* priv->echo_skb, if necessary.
*/
int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
+ unsigned int idx, unsigned int frame_len)
{
struct can_priv *priv = netdev_priv(dev);
@@ -62,6 +62,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->dev = dev;
+ /* save frame_len to reuse it when transmission is completed */
+ can_skb_prv(skb)->frame_len = frame_len;
+
/* save this skb for tx interrupt echo handling */
priv->echo_skb[idx] = skb;
} else {
@@ -815,7 +815,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
priv->write(can_id, &priv->tx_mb->can_id);
priv->write(ctrl, &priv->tx_mb->can_ctrl);
@@ -1448,7 +1448,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* taken.
*/
priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
- can_put_echo_skb(skb, dev, slotindex);
+ can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
* read from the memory
@@ -922,7 +922,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
/* Start the transmission */
writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
@@ -778,7 +778,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx);
+ can_put_echo_skb(skb, netdev, can->echo_idx, 0);
/* Move echo index to the next slot */
can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
@@ -1483,7 +1483,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1554,7 +1554,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx);
+ can_put_echo_skb(skb, dev, putidx, 0);
/* Enable TX FIFO element to start transfer */
m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
@@ -270,7 +270,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
- can_put_echo_skb(skb, dev, buf_id);
+ can_put_echo_skb(skb, dev, buf_id, 0);
/* Enable interrupt. */
priv->tx_active |= 1 << buf_id;
@@ -924,7 +924,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
&priv->regs->ifregs[1].data[i / 2]);
}
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
+ can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
/* Set the size of the data. Update if2_mcont */
iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
@@ -716,7 +716,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->echo_lock, flags);
/* prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, ndev, priv->echo_idx);
+ can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
/* move echo index to the next slot */
priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
@@ -617,7 +617,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
* the CPU-side pointer for the transmit FIFO to the next
@@ -1390,7 +1390,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
- can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++;
@@ -318,7 +318,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
for (i = 0; i < cf->len; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
cmd_reg_val |= CMD_AT;
@@ -104,7 +104,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
card->tx.last_bus = priv->index;
++card->tx.pending;
++priv->tx.pending;
- can_put_echo_skb(skb, dev, priv->tx.echo_put);
+ can_put_echo_skb(skb, dev, priv->tx.echo_put, 0);
++priv->tx.echo_put;
if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
priv->tx.echo_put = 0;
@@ -586,7 +586,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -1002,7 +1002,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
priv->tx_len = 1 + frame->len;
- can_put_echo_skb(priv->tx_skb, net, 0);
+ can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
}
@@ -2436,7 +2436,7 @@ static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
netif_stop_queue(ndev);
- can_put_echo_skb(skb, ndev, tx_head);
+ can_put_echo_skb(skb, ndev, tx_head, 0);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
if (err)
@@ -448,7 +448,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR);
- can_put_echo_skb(skb, dev, 0);
+ can_put_echo_skb(skb, dev, 0, 0);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ);
@@ -513,7 +513,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
- can_put_echo_skb(skb, ndev, mbxno);
+ can_put_echo_skb(skb, ndev, mbxno, 0);
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
@@ -801,7 +801,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
@@ -783,7 +783,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_jobs);
@@ -525,7 +525,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, idx);
+ can_put_echo_skb(skb, netdev, idx, 0);
atomic_inc(&dev->active_tx_urbs);
@@ -578,7 +578,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv;
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
@@ -355,7 +355,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
- can_put_echo_skb(skb, priv->netdev, ctx->ndx);
+ can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0);
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
if (err)
@@ -365,7 +365,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
usb_anchor_urb(urb, &dev->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&dev->active_tx_urbs);
@@ -1137,7 +1137,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
/* put the skb on can loopback stack */
spin_lock_irqsave(&up->echo_skb_lock, flags);
- can_put_echo_skb(skb, up->netdev, echo_index);
+ can_put_echo_skb(skb, up->netdev, echo_index, 0);
spin_unlock_irqrestore(&up->echo_skb_lock, flags);
/* transmit it */
@@ -664,7 +664,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
- can_put_echo_skb(skb, netdev, context->echo_index);
+ can_put_echo_skb(skb, netdev, context->echo_index, 0);
atomic_inc(&priv->active_tx_urbs);
@@ -592,9 +592,9 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
(priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
else
- can_put_echo_skb(skb, ndev, 0);
+ can_put_echo_skb(skb, ndev, 0, 0);
priv->tx_head++;
@@ -18,7 +18,7 @@
void can_flush_echo_skb(struct net_device *dev);
int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
+ unsigned int idx, unsigned int frame_len);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
u8 *len_ptr, unsigned int *frame_len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);