return netif_rx(skb);
 }
 
+/* post received skb with native 64-bit hw timestamp */
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high)
+{
+       struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+       u64 ns_ts;
+
+       ns_ts = (u64)ts_high << 32 | ts_low;
+       ns_ts *= NSEC_PER_USEC;
+       hwts->hwtstamp = ns_to_ktime(ns_ts);
+
+       return netif_rx(skb);
+}
+
 /*
  * callback for bulk Rx urb
  */
 
 void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
 int peak_usb_netif_rx(struct sk_buff *skb,
                      struct peak_time_ref *time_ref, u32 ts_low);
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
 void peak_usb_async_complete(struct urb *urb);
 void peak_usb_restart_complete(struct peak_usb_device *dev);
 
 
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cfd->len;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+       peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
+                            le32_to_cpu(rm->ts_high));
 
        return 0;
 }
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cf->len;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+       peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
+                            le32_to_cpu(sm->ts_high));
 
        return 0;
 }
        cf->can_id |= CAN_ERR_CRTL;
        cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
+       peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low),
+                            le32_to_cpu(ov->ts_high));
 
        netdev->stats.rx_over_errors++;
        netdev->stats.rx_errors++;