for (int i = 0; i != cdev->can.echo_skb_max; ++i)
                can_free_echo_skb(cdev->net, i, NULL);
 
+       netdev_reset_queue(cdev->net);
+
        spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
        cdev->tx_fifo_in_flight = 0;
        spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
  * echo. timestamp is used for peripherals to ensure correct ordering
  * by rx-offload, and is ignored for non-peripherals.
  */
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
-                                 unsigned int msg_mark,
-                                 u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+                                         unsigned int msg_mark, u32 timestamp)
 {
        struct net_device *dev = cdev->net;
        struct net_device_stats *stats = &dev->stats;
+       unsigned int frame_len;
 
        if (cdev->is_peripheral)
                stats->tx_bytes +=
                        can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
                                                                    msg_mark,
                                                                    timestamp,
-                                                                   NULL);
+                                                                   &frame_len);
        else
-               stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+               stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
 
        stats->tx_packets++;
+
+       return frame_len;
 }
 
-static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted)
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+                           unsigned int transmitted_frame_len)
 {
        unsigned long irqflags;
 
+       netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
        spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
        if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
                netif_wake_queue(cdev->net);
        int err = 0;
        unsigned int msg_mark;
        int processed = 0;
+       unsigned int processed_frame_len = 0;
 
        struct m_can_classdev *cdev = netdev_priv(dev);
 
                fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
 
                /* update stats */
-               m_can_tx_update_stats(cdev, msg_mark, timestamp);
+               processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+                                                            timestamp);
+
                ++processed;
        }
 
                m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
                                                          ack_fgi));
 
-       m_can_finish_tx(cdev, processed);
+       m_can_finish_tx(cdev, processed, processed_frame_len);
 
        return err;
 }
                if (ir & IR_TC) {
                        /* Transmission Complete Interrupt*/
                        u32 timestamp = 0;
+                       unsigned int frame_len;
 
                        if (cdev->is_peripheral)
                                timestamp = m_can_get_timestamp(cdev);
-                       m_can_tx_update_stats(cdev, 0, timestamp);
-                       m_can_finish_tx(cdev, 1);
+                       frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+                       m_can_finish_tx(cdev, 1, frame_len);
                }
        } else  {
                if (ir & (IR_TEFN | IR_TEFW)) {
        u32 cccr, fdflags;
        int err;
        u32 putidx;
+       unsigned int frame_len = can_skb_get_frame_len(skb);
 
        /* Generate ID field for TX buffer Element */
        /* Common to all supported M_CAN versions */
                }
                m_can_write(cdev, M_CAN_TXBTIE, 0x1);
 
-               can_put_echo_skb(skb, dev, 0, 0);
+               can_put_echo_skb(skb, dev, 0, frame_len);
 
                m_can_write(cdev, M_CAN_TXBAR, 0x1);
                /* End of xmit function for version 3.0.x */
                /* Push loopback echo.
                 * Will be looped back on TX interrupt based on message marker
                 */
-               can_put_echo_skb(skb, dev, putidx, 0);
+               can_put_echo_skb(skb, dev, putidx, frame_len);
 
                /* Enable TX FIFO element to start transfer  */
                m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
                                    struct net_device *dev)
 {
        struct m_can_classdev *cdev = netdev_priv(dev);
+       unsigned int frame_len;
        netdev_tx_t ret;
 
        if (can_dev_dropped_skb(dev, skb))
                return NETDEV_TX_OK;
 
+       frame_len = can_skb_get_frame_len(skb);
+
        if (cdev->can.state == CAN_STATE_BUS_OFF) {
                m_can_clean(cdev->net);
                return NETDEV_TX_OK;
        if (ret != NETDEV_TX_OK)
                return ret;
 
+       netdev_sent_queue(dev, frame_len);
+
        if (cdev->is_peripheral)
-               return m_can_start_peripheral_xmit(cdev, skb);
+               ret = m_can_start_peripheral_xmit(cdev, skb);
        else
-               return m_can_tx_handler(cdev, skb);
+               ret = m_can_tx_handler(cdev, skb);
+
+       if (ret != NETDEV_TX_OK)
+               netdev_completed_queue(dev, 1, frame_len);
+
+       return ret;
 }
 
 static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)