* @secys: linked list of SecY's on the underlying device
  * @gro_cells: pointer to the Generic Receive Offload cell
  * @offload: status of offloading on the MACsec device
+ * @insert_tx_tag: when offloading, device requires to insert an
+ *     additional tag
  */
 struct macsec_dev {
        struct macsec_secy secy;
        struct list_head secys;
        struct gro_cells gro_cells;
        enum macsec_offload offload;
+       bool insert_tx_tag;
 };
 
 /**
        return false;
 }
 
+static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
+                               const struct macsec_ops *ops)
+{
+       return macsec->offload == MACSEC_OFFLOAD_PHY &&
+               ops->mdo_insert_tx_tag;
+}
+
+static void macsec_set_head_tail_room(struct net_device *dev)
+{
+       struct macsec_dev *macsec = macsec_priv(dev);
+       struct net_device *real_dev = macsec->real_dev;
+       int needed_headroom, needed_tailroom;
+       const struct macsec_ops *ops;
+
+       ops = macsec_get_ops(macsec, NULL);
+       if (ops) {
+               needed_headroom = ops->needed_headroom;
+               needed_tailroom = ops->needed_tailroom;
+       } else {
+               needed_headroom = MACSEC_NEEDED_HEADROOM;
+               needed_tailroom = MACSEC_NEEDED_TAILROOM;
+       }
+
+       dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
+       dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
+}
+
 static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
 {
        enum macsec_offload prev_offload;
        ctx.secy = &macsec->secy;
        ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
                                            : macsec_offload(ops->mdo_add_secy, &ctx);
-       if (ret)
+       if (ret) {
                macsec->offload = prev_offload;
+               return ret;
+       }
+
+       macsec_set_head_tail_room(dev);
+       macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
 
        return ret;
 }
        .resv_start_op  = MACSEC_CMD_UPD_OFFLOAD + 1,
 };
 
+static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
+                                           struct net_device *dev)
+{
+       struct macsec_dev *macsec = macsec_priv(dev);
+       const struct macsec_ops *ops;
+       struct phy_device *phydev;
+       struct macsec_context ctx;
+       int skb_final_len;
+       int err;
+
+       ops = macsec_get_ops(macsec, &ctx);
+       skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
+               ops->needed_tailroom;
+       if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
+               err = -EINVAL;
+               goto cleanup;
+       }
+
+       phydev = macsec->real_dev->phydev;
+
+       err = skb_ensure_writable_head_tail(skb, dev);
+       if (unlikely(err < 0))
+               goto cleanup;
+
+       err = ops->mdo_insert_tx_tag(phydev, skb);
+       if (unlikely(err))
+               goto cleanup;
+
+       return skb;
+cleanup:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+
 static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
                                     struct net_device *dev)
 {
                skb_dst_drop(skb);
                dst_hold(&md_dst->dst);
                skb_dst_set(skb, &md_dst->dst);
+
+               if (macsec->insert_tx_tag) {
+                       skb = macsec_insert_tx_tag(skb, dev);
+                       if (IS_ERR(skb)) {
+                               DEV_STATS_INC(dev, tx_dropped);
+                               return NETDEV_TX_OK;
+                       }
+               }
+
                skb->dev = macsec->real_dev;
                return dev_queue_xmit(skb);
        }
        dev->features = real_dev->features & MACSEC_FEATURES;
        dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
 
-       dev->needed_headroom = real_dev->needed_headroom +
-                              MACSEC_NEEDED_HEADROOM;
-       dev->needed_tailroom = real_dev->needed_tailroom +
-                              MACSEC_NEEDED_TAILROOM;
+       macsec_set_head_tail_room(dev);
 
        if (is_zero_ether_addr(dev->dev_addr))
                eth_hw_addr_inherit(dev, real_dev);
                        err = macsec_offload(ops->mdo_add_secy, &ctx);
                        if (err)
                                goto del_dev;
+
+                       macsec->insert_tx_tag =
+                               macsec_needs_tx_tag(macsec, ops);
                }
        }
 
 
  * @mdo_get_tx_sa_stats: called when TX SA stats are read
  * @mdo_get_rx_sc_stats: called when RX SC stats are read
  * @mdo_get_rx_sa_stats: called when RX SA stats are read
+ * @mdo_insert_tx_tag: called to insert the TX tag
+ * @needed_headroom: number of bytes reserved at the beginning of the sk_buff
+ *     for the TX tag
+ * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
+ *     TX tag
  */
 struct macsec_ops {
        /* Device wide */
        int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
        int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
        int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+       /* Offload tag */
+       int (*mdo_insert_tx_tag)(struct phy_device *phydev,
+                                struct sk_buff *skb);
+       unsigned int needed_headroom;
+       unsigned int needed_tailroom;
 };
 
 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);