struct ionic_dev *idev = &lif->ionic->idev;
        unsigned long irqflags;
        unsigned int flags = 0;
+       int rx_work = 0;
+       int tx_work = 0;
        int n_work = 0;
        int a_work = 0;
        int work_done;
+       int credits;
 
        if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
                n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
                                          ionic_adminq_service, NULL, NULL);
        spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
 
-       work_done = max(n_work, a_work);
+       if (lif->hwstamp_rxq)
+               rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
+                                          ionic_rx_service, NULL, NULL);
+
+       if (lif->hwstamp_txq)
+               tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
+                                          ionic_tx_service, NULL, NULL);
+
+       work_done = max(max(n_work, a_work), max(rx_work, tx_work));
        if (work_done < budget && napi_complete_done(napi, work_done)) {
                flags |= IONIC_INTR_CRED_UNMASK;
                intr->rearm_count++;
 
        if (work_done || flags) {
                flags |= IONIC_INTR_CRED_RESET_COALESCE;
-               ionic_intr_credits(idev->intr_ctrl,
-                                  intr->index,
-                                  n_work + a_work, flags);
+               credits = n_work + a_work + rx_work + tx_work;
+               ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
        }
 
        return work_done;
        int err;
 
        ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
+
        err = ionic_adminq_post_wait(lif, &ctx);
        if (err)
                return err;
                }
        }
        lif->rx_mode = 0;
+
+       if (lif->hwstamp_txq) {
+               ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
+               ionic_tx_flush(&lif->hwstamp_txq->cq);
+               ionic_tx_empty(&lif->hwstamp_txq->q);
+       }
+
+       if (lif->hwstamp_rxq) {
+               ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
+               ionic_rx_empty(&lif->hwstamp_rxq->q);
+       }
 }
 
 static void ionic_txrx_free(struct ionic_lif *lif)
                }
        }
 
+       if (lif->hwstamp_rxq) {
+               ionic_rx_fill(&lif->hwstamp_rxq->q);
+               err = ionic_qcq_enable(lif->hwstamp_rxq);
+               if (err)
+                       goto err_out_hwstamp_rx;
+       }
+
+       if (lif->hwstamp_txq) {
+               err = ionic_qcq_enable(lif->hwstamp_txq);
+               if (err)
+                       goto err_out_hwstamp_tx;
+       }
+
        return 0;
 
+err_out_hwstamp_tx:
+       if (lif->hwstamp_rxq)
+               derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+err_out_hwstamp_rx:
+       i = lif->nxqs;
 err_out:
        while (i--) {
                derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
                        goto err_txrx_free;
        }
 
+       /* restore the hardware timestamping queues */
+       ionic_lif_hwstamp_set(lif, NULL);
+
        clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
        ionic_link_status_check_request(lif, CAN_SLEEP);
        netif_device_attach(lif->netdev);
 
 #include "ionic_txrx.h"
 
 
-static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-
 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
                                  ionic_desc_cb cb_func, void *cb_arg)
 {
                stats->vlan_stripped++;
        }
 
+       if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
+               __le64 *cq_desc_hwstamp;
+               u64 hwstamp;
+
+               cq_desc_hwstamp =
+                       cq_info->cq_desc +
+                       qcq->cq.desc_size -
+                       sizeof(struct ionic_rxq_comp) -
+                       IONIC_HWSTAMP_CQ_NEGOFFSET;
+
+               hwstamp = le64_to_cpu(*cq_desc_hwstamp);
+
+               if (hwstamp != IONIC_HWSTAMP_INVALID) {
+                       skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
+                       stats->hwstamp_valid++;
+               } else {
+                       stats->hwstamp_invalid++;
+               }
+       }
+
        if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
                napi_gro_receive(&qcq->napi, skb);
        else
                napi_gro_frags(&qcq->napi);
 }
 
-static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
 {
        struct ionic_queue *q = cq->bound_q;
        struct ionic_desc_info *desc_info;
 {
        struct ionic_buf_info *buf_info = desc_info->bufs;
        struct ionic_tx_stats *stats = q_to_tx_stats(q);
+       struct ionic_qcq *qcq = q_to_qcq(q);
+       struct sk_buff *skb = cb_arg;
        struct device *dev = q->dev;
-       u16 queue_index;
        unsigned int i;
+       u16 qi;
 
        if (desc_info->nbufs) {
                dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
                                       buf_info->len, DMA_TO_DEVICE);
        }
 
-       if (cb_arg) {
-               struct sk_buff *skb = cb_arg;
+       if (!skb)
+               return;
 
-               queue_index = skb_get_queue_mapping(skb);
-               if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
-                                                     queue_index))) {
-                       netif_wake_subqueue(q->lif->netdev, queue_index);
-                       q->wake++;
-               }
+       qi = skb_get_queue_mapping(skb);
+
+       if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
+               if (cq_info) {
+                       struct skb_shared_hwtstamps hwts = {};
+                       __le64 *cq_desc_hwstamp;
+                       u64 hwstamp;
+
+                       cq_desc_hwstamp =
+                               cq_info->cq_desc +
+                               qcq->cq.desc_size -
+                               sizeof(struct ionic_txq_comp) -
+                               IONIC_HWSTAMP_CQ_NEGOFFSET;
+
+                       hwstamp = le64_to_cpu(*cq_desc_hwstamp);
 
-               desc_info->bytes = skb->len;
-               stats->clean++;
+                       if (hwstamp != IONIC_HWSTAMP_INVALID) {
+                               hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
 
-               dev_consume_skb_any(skb);
+                               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+                               skb_tstamp_tx(skb, &hwts);
+
+                               stats->hwstamp_valid++;
+                       } else {
+                               stats->hwstamp_invalid++;
+                       }
+               }
+
+       } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
+               netif_wake_subqueue(q->lif->netdev, qi);
+               q->wake++;
        }
+
+       desc_info->bytes = skb->len;
+       stats->clean++;
+
+       dev_consume_skb_any(skb);
 }
 
-static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
 {
        struct ionic_queue *q = cq->bound_q;
        struct ionic_desc_info *desc_info;
                desc_info->cb_arg = NULL;
        } while (index != le16_to_cpu(comp->comp_index));
 
-       if (pkts && bytes)
+       if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
                netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
 
        return true;
                desc_info->cb_arg = NULL;
        }
 
-       if (pkts && bytes)
+       if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
                netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
 }
 
 
        if (start) {
                skb_tx_timestamp(skb);
-               netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+               if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+                       netdev_tx_sent_queue(q_to_ndq(q), skb->len);
                ionic_txq_post(q, false, ionic_tx_clean, skb);
        } else {
                ionic_txq_post(q, done, NULL, NULL);
        stats->pkts++;
        stats->bytes += skb->len;
 
-       netdev_tx_sent_queue(q_to_ndq(q), skb->len);
+       if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
+               netdev_tx_sent_queue(q_to_ndq(q), skb->len);
        ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
 
        return 0;
        return stopped;
 }
 
+static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
+                                           struct net_device *netdev)
+{
+       struct ionic_lif *lif = netdev_priv(netdev);
+       struct ionic_queue *q = &lif->hwstamp_txq->q;
+       int err, ndescs;
+
+       /* Does not stop/start txq, because we post to a separate tx queue
+        * for timestamping, and if a packet can't be posted immediately to
+        * the timestamping queue, it is dropped.
+        */
+
+       ndescs = ionic_tx_descs_needed(q, skb);
+       if (unlikely(ndescs < 0))
+               goto err_out_drop;
+
+       if (unlikely(!ionic_q_has_space(q, ndescs)))
+               goto err_out_drop;
+
+       if (skb_is_gso(skb))
+               err = ionic_tx_tso(q, skb);
+       else
+               err = ionic_tx(q, skb);
+
+       if (err)
+               goto err_out_drop;
+
+       return NETDEV_TX_OK;
+
+err_out_drop:
+       q->drop++;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        u16 queue_index = skb_get_queue_mapping(skb);
                return NETDEV_TX_OK;
        }
 
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               if (lif->hwstamp_txq)
+                       return ionic_start_hwstamp_xmit(skb, netdev);
+
        if (unlikely(queue_index >= lif->nxqs))
                queue_index = 0;
        q = &lif->txqcqs[queue_index]->q;