@@ -856,7 +856,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
+ can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
dev->stats.tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
}
@@ -733,7 +733,7 @@ static void c_can_do_tx(struct net_device *dev)
pend &= ~(1 << idx);
obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
c_can_inval_tx_object(dev, IF_RX, obj);
- can_get_echo_skb(dev, idx);
+ can_get_echo_skb(dev, idx, NULL);
bytes += priv->dlc[idx];
pkts++;
}
@@ -703,7 +703,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
stats->tx_packets++;
can_put_echo_skb(priv->tx_skb, dev, 0, 0);
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
priv->tx_skb = NULL;
netif_wake_queue(dev);
@@ -121,12 +121,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
* is handled in the device driver. The driver must protect
* access to priv->echo_skb, if necessary.
*/
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
u8 len;
- skb = __can_get_echo_skb(dev, idx, &len, NULL);
+ skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
@@ -517,7 +517,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
stats->tx_packets++;
stats->tx_bytes += priv->txdlc[i];
priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i);
+ can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i);
@@ -629,7 +629,7 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* TX IRQ */
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
- stats->tx_bytes += can_get_echo_skb(ndev, 0);
+ stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
}
@@ -1467,7 +1467,7 @@ static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
struct net_device_stats *stats = &can->can.dev->stats;
stats->tx_bytes += dlc;
@@ -1533,7 +1533,7 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
- int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx, NULL);
u8 count = ioread32(can->reg_base +
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
@@ -930,7 +930,7 @@ static void m_can_echo_tx_event(struct net_device *dev)
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
stats->tx_packets++;
}
}
@@ -972,7 +972,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
- stats->tx_bytes += can_get_echo_skb(dev, 0);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
@@ -448,7 +448,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
out_8(®s->cantbsel, mask);
stats->tx_bytes += in_8(®s->tx.dlr);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id);
+ can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
@@ -711,7 +711,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
struct net_device_stats *stats = &(priv->ndev->stats);
u32 dlc;
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
@@ -266,7 +266,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client);
+ can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
stats->tx_bytes += cf_len;
@@ -386,7 +386,7 @@ static void rcar_can_tx_done(struct net_device *ndev)
stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
RCAR_CAN_FIFO_DEPTH];
priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
priv->tx_tail++;
netif_wake_queue(ndev);
}
@@ -1044,7 +1044,7 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
stats->tx_packets++;
stats->tx_bytes += priv->tx_len[sent];
priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent);
+ can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
@@ -531,7 +531,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
stats->tx_bytes +=
priv->read_reg(priv, SJA1000_FI) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
@@ -284,7 +284,7 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get);
+ can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
@@ -725,7 +725,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
@@ -1171,7 +1171,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
net->stats.tx_bytes += priv->tx_len - 1;
can_led_event(net, CAN_LED_EVENT_TX);
if (priv->tx_len) {
- can_get_echo_skb(net, 0);
+ can_get_echo_skb(net, 0, NULL);
priv->tx_len = 0;
}
netif_wake_queue(net);
@@ -655,7 +655,7 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
readl(priv->base +
SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
stats->tx_packets++;
- can_get_echo_skb(dev, 0);
+ can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
@@ -518,7 +518,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -357,7 +357,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index);
@@ -370,7 +370,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id);
+ can_get_echo_skb(netdev, hf->echo_id, NULL);
gs_free_tx_context(txc);
@@ -1151,7 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -594,7 +594,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index);
+ can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -237,7 +237,7 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += ctx->dlc;
can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx);
+ can_get_echo_skb(netdev, ctx->ndx, NULL);
}
if (urb->status)
@@ -309,7 +309,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
/* do wakeup tx queue in case of success only */
@@ -672,7 +672,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* update statistics */
up->netdev->stats.tx_packets++;
up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index);
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index);
@@ -585,7 +585,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += context->dlc;
- can_get_echo_skb(netdev, context->echo_index);
+ can_get_echo_skb(netdev, context->echo_index, NULL);
can_led_event(netdev, CAN_LED_EVENT_TX);
@@ -1292,7 +1292,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
while (frames_sent--) {
stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
- priv->tx_max);
+ priv->tx_max, NULL);
priv->tx_tail++;
stats->tx_packets++;
}
@@ -21,7 +21,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx, unsigned int frame_len);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
u8 *len_ptr, unsigned int *frame_len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,