#include <net/gro_cells.h>
 #include <net/macsec.h>
 #include <linux/phy.h>
+#include <linux/byteorder/generic.h>
 
 #include <uapi/linux/if_macsec.h>
 
             sc;                                        \
             sc = rtnl_dereference(sc->next))
 
+#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
+
+struct gcm_iv_xpn {
+       union {
+               u8 short_secure_channel_id[4];
+               ssci_t ssci;
+       };
+       __be64 pn;
+} __packed;
+
 struct gcm_iv {
        union {
                u8 secure_channel_id[8];
        return __macsec_get_ops(macsec->offload, macsec, ctx);
 }
 
-/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
-static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
+static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
 {
        struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
        int len = skb->len - 2 * ETH_ALEN;
        if (h->unused)
                return false;
 
-       /* rx.pn != 0 (figure 10-5) */
-       if (!h->packet_number)
+       /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
+       if (!h->packet_number && !xpn)
                return false;
 
        /* length check, f) g) h) i) */
 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
 
+static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
+                              salt_t salt)
+{
+       struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
+
+       gcm_iv->ssci = ssci ^ salt.ssci;
+       gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
+}
+
 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
 {
        struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
 }
 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
 
-static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
+                           struct macsec_secy *secy)
 {
-       u32 pn;
+       pn_t pn;
 
        spin_lock_bh(&tx_sa->lock);
-       pn = tx_sa->next_pn;
 
-       tx_sa->next_pn++;
+       pn = tx_sa->next_pn_halves;
+       if (secy->xpn)
+               tx_sa->next_pn++;
+       else
+               tx_sa->next_pn_halves.lower++;
+
        if (tx_sa->next_pn == 0)
                __macsec_pn_wrapped(secy, tx_sa);
        spin_unlock_bh(&tx_sa->lock);
        struct macsec_tx_sa *tx_sa;
        struct macsec_dev *macsec = macsec_priv(dev);
        bool sci_present;
-       u32 pn;
+       pn_t pn;
 
        secy = &macsec->secy;
        tx_sc = &secy->tx_sc;
        memmove(hh, eth, 2 * ETH_ALEN);
 
        pn = tx_sa_update_pn(tx_sa, secy);
-       if (pn == 0) {
+       if (pn.full64 == 0) {
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(-ENOLINK);
        }
-       macsec_fill_sectag(hh, secy, pn, sci_present);
+       macsec_fill_sectag(hh, secy, pn.lower, sci_present);
        macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 
        skb_put(skb, secy->icv_len);
                return ERR_PTR(-ENOMEM);
        }
 
-       macsec_fill_iv(iv, secy->sci, pn);
+       if (secy->xpn)
+               macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
+       else
+               macsec_fill_iv(iv, secy->sci, pn.lower);
 
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        u32 lowest_pn = 0;
 
        spin_lock(&rx_sa->lock);
-       if (rx_sa->next_pn >= secy->replay_window)
-               lowest_pn = rx_sa->next_pn - secy->replay_window;
+       if (rx_sa->next_pn_halves.lower >= secy->replay_window)
+               lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
 
        /* Now perform replay protection check again
         * (see IEEE 802.1AE-2006 figure 10-5)
         */
-       if (secy->replay_protect && pn < lowest_pn) {
+       if (secy->replay_protect && pn < lowest_pn &&
+           (!secy->xpn || pn_same_half(pn, lowest_pn))) {
                spin_unlock(&rx_sa->lock);
                u64_stats_update_begin(&rxsc_stats->syncp);
                rxsc_stats->stats.InPktsLate++;
                }
                u64_stats_update_end(&rxsc_stats->syncp);
 
-               if (pn >= rx_sa->next_pn)
-                       rx_sa->next_pn = pn + 1;
+               // Instead of "pn >=" - to support pn overflow in xpn
+               if (pn + 1 > rx_sa->next_pn_halves.lower) {
+                       rx_sa->next_pn_halves.lower = pn + 1;
+               } else if (secy->xpn &&
+                          !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
+                       rx_sa->next_pn_halves.upper++;
+                       rx_sa->next_pn_halves.lower = pn + 1;
+               }
+
                spin_unlock(&rx_sa->lock);
        }
 
        unsigned char *iv;
        struct aead_request *req;
        struct macsec_eth_header *hdr;
+       u32 hdr_pn;
        u16 icv_len = secy->icv_len;
 
        macsec_skb_cb(skb)->valid = false;
        }
 
        hdr = (struct macsec_eth_header *)skb->data;
-       macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
+       hdr_pn = ntohl(hdr->packet_number);
+
+       if (secy->xpn) {
+               pn_t recovered_pn = rx_sa->next_pn_halves;
+
+               recovered_pn.lower = hdr_pn;
+               if (hdr_pn < rx_sa->next_pn_halves.lower &&
+                   !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
+                       recovered_pn.upper++;
+
+               macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
+                                  rx_sa->key.salt);
+       } else {
+               macsec_fill_iv(iv, sci, hdr_pn);
+       }
 
        sg_init_table(sg, ret);
        ret = skb_to_sgvec(skb, sg, 0, skb->len);
        struct macsec_rxh_data *rxd;
        struct macsec_dev *macsec;
        sci_t sci;
-       u32 pn;
+       u32 hdr_pn;
        bool cbit;
        struct pcpu_rx_sc_stats *rxsc_stats;
        struct pcpu_secy_stats *secy_stats;
        secy_stats = this_cpu_ptr(macsec->stats);
        rxsc_stats = this_cpu_ptr(rx_sc->stats);
 
-       if (!macsec_validate_skb(skb, secy->icv_len)) {
+       if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
                u64_stats_update_begin(&secy_stats->syncp);
                secy_stats->stats.InPktsBadTag++;
                u64_stats_update_end(&secy_stats->syncp);
        }
 
        /* First, PN check to avoid decrypting obviously wrong packets */
-       pn = ntohl(hdr->packet_number);
+       hdr_pn = ntohl(hdr->packet_number);
        if (secy->replay_protect) {
                bool late;
 
                spin_lock(&rx_sa->lock);
-               late = rx_sa->next_pn >= secy->replay_window &&
-                      pn < (rx_sa->next_pn - secy->replay_window);
+               late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
+                      hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
+
+               if (secy->xpn)
+                       late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
                spin_unlock(&rx_sa->lock);
 
                if (late) {
                return RX_HANDLER_CONSUMED;
        }
 
-       if (!macsec_post_decrypt(skb, secy, pn))
+       if (!macsec_post_decrypt(skb, secy, hdr_pn))
                goto drop;
 
 deliver:
 
        if (tb_sa[MACSEC_SA_ATTR_PN]) {
                spin_lock_bh(&rx_sa->lock);
-               rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+               rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
                spin_unlock_bh(&rx_sa->lock);
        }
 
        }
 
        spin_lock_bh(&tx_sa->lock);
-       tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+       tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
        spin_unlock_bh(&tx_sa->lock);
 
        if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
        u8 assoc_num;
        struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
        bool was_operational, was_active;
-       u32 prev_pn = 0;
+       pn_t prev_pn;
        int ret = 0;
 
+       prev_pn.full64 = 0;
+
        if (!attrs[MACSEC_ATTR_IFINDEX])
                return -EINVAL;
 
 
        if (tb_sa[MACSEC_SA_ATTR_PN]) {
                spin_lock_bh(&tx_sa->lock);
-               prev_pn = tx_sa->next_pn;
-               tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+               prev_pn = tx_sa->next_pn_halves;
+               tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
                spin_unlock_bh(&tx_sa->lock);
        }
 
 cleanup:
        if (tb_sa[MACSEC_SA_ATTR_PN]) {
                spin_lock_bh(&tx_sa->lock);
-               tx_sa->next_pn = prev_pn;
+               tx_sa->next_pn_halves = prev_pn;
                spin_unlock_bh(&tx_sa->lock);
        }
        tx_sa->active = was_active;
        struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
        struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
        bool was_active;
-       u32 prev_pn = 0;
+       pn_t prev_pn;
        int ret = 0;
 
+       prev_pn.full64 = 0;
+
        if (!attrs[MACSEC_ATTR_IFINDEX])
                return -EINVAL;
 
 
        if (tb_sa[MACSEC_SA_ATTR_PN]) {
                spin_lock_bh(&rx_sa->lock);
-               prev_pn = rx_sa->next_pn;
-               rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+               prev_pn = rx_sa->next_pn_halves;
+               rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
                spin_unlock_bh(&rx_sa->lock);
        }
 
 cleanup:
        if (tb_sa[MACSEC_SA_ATTR_PN]) {
                spin_lock_bh(&rx_sa->lock);
-               rx_sa->next_pn = prev_pn;
+               rx_sa->next_pn_halves = prev_pn;
                spin_unlock_bh(&rx_sa->lock);
        }
        rx_sa->active = was_active;
                }
 
                if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
-                   nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
+                   nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
                    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
                    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
                        nla_nest_cancel(skb, txsa_nest);
                        nla_nest_end(skb, attr);
 
                        if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
-                           nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
+                           nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
                            nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
                            nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
                                nla_nest_cancel(skb, rxsa_nest);